1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Poul-Henning Kamp of the FreeBSD Project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_ddb.h" 41 #include "opt_ktrace.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/capsicum.h> 46 #include <sys/counter.h> 47 #include <sys/filedesc.h> 48 #include <sys/fnv_hash.h> 49 #include <sys/kernel.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/malloc.h> 53 #include <sys/fcntl.h> 54 #include <sys/jail.h> 55 #include <sys/mount.h> 56 #include <sys/namei.h> 57 #include <sys/proc.h> 58 #include <sys/seqc.h> 59 #include <sys/sdt.h> 60 #include <sys/smr.h> 61 #include <sys/smp.h> 62 #include <sys/syscallsubr.h> 63 #include <sys/sysctl.h> 64 #include <sys/sysproto.h> 65 #include <sys/vnode.h> 66 #include <ck_queue.h> 67 #ifdef KTRACE 68 #include <sys/ktrace.h> 69 #endif 70 #ifdef INVARIANTS 71 #include <machine/_inttypes.h> 72 #endif 73 74 #include <sys/capsicum.h> 75 76 #include <security/audit/audit.h> 77 #include <security/mac/mac_framework.h> 78 79 #ifdef DDB 80 #include <ddb/ddb.h> 81 #endif 82 83 #include <vm/uma.h> 84 85 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 86 "Name cache"); 87 88 SDT_PROVIDER_DECLARE(vfs); 89 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 90 "struct vnode *"); 91 SDT_PROBE_DEFINE3(vfs, namecache, enter, duplicate, "struct vnode *", "char *", 92 "struct vnode *"); 93 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 94 "char *"); 95 SDT_PROBE_DEFINE2(vfs, namecache, fullpath_smr, hit, "struct vnode *", 96 "const char *"); 97 SDT_PROBE_DEFINE4(vfs, namecache, fullpath_smr, miss, "struct vnode *", 98 "struct namecache *", "int", "int"); 99 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 100 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 101 "char *", "struct vnode *"); 102 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 103 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 104 "struct vnode *", "char *"); 105 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 106 "struct vnode *"); 107 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 108 "struct vnode *", "char *"); 109 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 110 "char *"); 111 SDT_PROBE_DEFINE2(vfs, namecache, removecnp, hit, "struct vnode *", 112 "struct componentname *"); 113 SDT_PROBE_DEFINE2(vfs, namecache, removecnp, miss, "struct vnode *", 114 "struct componentname *"); 115 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *"); 116 SDT_PROBE_DEFINE1(vfs, namecache, purge, batch, "int"); 117 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 118 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 119 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 120 "struct vnode *"); 121 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", 122 "char *"); 123 SDT_PROBE_DEFINE2(vfs, namecache, evict_negative, done, "struct vnode *", 124 "char *"); 125 SDT_PROBE_DEFINE1(vfs, namecache, symlink, alloc__fail, "size_t"); 126 127 SDT_PROBE_DEFINE3(vfs, fplookup, lookup, done, "struct nameidata", "int", "bool"); 128 SDT_PROBE_DECLARE(vfs, namei, lookup, entry); 129 SDT_PROBE_DECLARE(vfs, namei, lookup, return); 130 131 /* 132 * This structure describes the elements in the cache of recent 133 * names looked up by namei. 134 */ 135 struct negstate { 136 u_char neg_flag; 137 u_char neg_hit; 138 }; 139 _Static_assert(sizeof(struct negstate) <= sizeof(struct vnode *), 140 "the state must fit in a union with a pointer without growing it"); 141 142 struct namecache { 143 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 144 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 145 CK_SLIST_ENTRY(namecache) nc_hash;/* hash chain */ 146 struct vnode *nc_dvp; /* vnode of parent of name */ 147 union { 148 struct vnode *nu_vp; /* vnode the name refers to */ 149 struct negstate nu_neg;/* negative entry state */ 150 } n_un; 151 u_char nc_flag; /* flag bits */ 152 u_char nc_nlen; /* length of name */ 153 char nc_name[0]; /* segment name + nul */ 154 }; 155 156 /* 157 * struct namecache_ts repeats struct namecache layout up to the 158 * nc_nlen member. 159 * struct namecache_ts is used in place of struct namecache when time(s) need 160 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 161 * both a non-dotdot directory name plus dotdot for the directory's 162 * parent. 163 * 164 * See below for alignment requirement. 165 */ 166 struct namecache_ts { 167 struct timespec nc_time; /* timespec provided by fs */ 168 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 169 int nc_ticks; /* ticks value when entry was added */ 170 int nc_pad; 171 struct namecache nc_nc; 172 }; 173 174 TAILQ_HEAD(cache_freebatch, namecache); 175 176 /* 177 * At least mips n32 performs 64-bit accesses to timespec as found 178 * in namecache_ts and requires them to be aligned. Since others 179 * may be in the same spot suffer a little bit and enforce the 180 * alignment for everyone. Note this is a nop for 64-bit platforms. 181 */ 182 #define CACHE_ZONE_ALIGNMENT UMA_ALIGNOF(time_t) 183 184 /* 185 * TODO: the initial value of CACHE_PATH_CUTOFF was inherited from the 186 * 4.4 BSD codebase. Later on struct namecache was tweaked to become 187 * smaller and the value was bumped to retain the total size, but it 188 * was never re-evaluated for suitability. A simple test counting 189 * lengths during package building shows that the value of 45 covers 190 * about 86% of all added entries, reaching 99% at 65. 191 * 192 * Regardless of the above, use of dedicated zones instead of malloc may be 193 * inducing additional waste. This may be hard to address as said zones are 194 * tied to VFS SMR. Even if retaining them, the current split should be 195 * re-evaluated. 196 */ 197 #ifdef __LP64__ 198 #define CACHE_PATH_CUTOFF 45 199 #define CACHE_LARGE_PAD 6 200 #else 201 #define CACHE_PATH_CUTOFF 41 202 #define CACHE_LARGE_PAD 2 203 #endif 204 205 #define CACHE_ZONE_SMALL_SIZE (offsetof(struct namecache, nc_name) + CACHE_PATH_CUTOFF + 1) 206 #define CACHE_ZONE_SMALL_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_SMALL_SIZE) 207 #define CACHE_ZONE_LARGE_SIZE (offsetof(struct namecache, nc_name) + NAME_MAX + 1 + CACHE_LARGE_PAD) 208 #define CACHE_ZONE_LARGE_TS_SIZE (offsetof(struct namecache_ts, nc_nc) + CACHE_ZONE_LARGE_SIZE) 209 210 _Static_assert((CACHE_ZONE_SMALL_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 211 _Static_assert((CACHE_ZONE_SMALL_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 212 _Static_assert((CACHE_ZONE_LARGE_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 213 _Static_assert((CACHE_ZONE_LARGE_TS_SIZE % (CACHE_ZONE_ALIGNMENT + 1)) == 0, "bad zone size"); 214 215 #define nc_vp n_un.nu_vp 216 #define nc_neg n_un.nu_neg 217 218 /* 219 * Flags in namecache.nc_flag 220 */ 221 #define NCF_WHITE 0x01 222 #define NCF_ISDOTDOT 0x02 223 #define NCF_TS 0x04 224 #define NCF_DTS 0x08 225 #define NCF_DVDROP 0x10 226 #define NCF_NEGATIVE 0x20 227 #define NCF_INVALID 0x40 228 #define NCF_WIP 0x80 229 230 /* 231 * Flags in negstate.neg_flag 232 */ 233 #define NEG_HOT 0x01 234 235 static bool cache_neg_evict_cond(u_long lnumcache); 236 237 /* 238 * Mark an entry as invalid. 239 * 240 * This is called before it starts getting deconstructed. 241 */ 242 static void 243 cache_ncp_invalidate(struct namecache *ncp) 244 { 245 246 KASSERT((ncp->nc_flag & NCF_INVALID) == 0, 247 ("%s: entry %p already invalid", __func__, ncp)); 248 atomic_store_char(&ncp->nc_flag, ncp->nc_flag | NCF_INVALID); 249 atomic_thread_fence_rel(); 250 } 251 252 /* 253 * Check whether the entry can be safely used. 254 * 255 * All places which elide locks are supposed to call this after they are 256 * done with reading from an entry. 257 */ 258 #define cache_ncp_canuse(ncp) ({ \ 259 struct namecache *_ncp = (ncp); \ 260 u_char _nc_flag; \ 261 \ 262 atomic_thread_fence_acq(); \ 263 _nc_flag = atomic_load_char(&_ncp->nc_flag); \ 264 __predict_true((_nc_flag & (NCF_INVALID | NCF_WIP)) == 0); \ 265 }) 266 267 /* 268 * Like the above but also checks NCF_WHITE. 269 */ 270 #define cache_fpl_neg_ncp_canuse(ncp) ({ \ 271 struct namecache *_ncp = (ncp); \ 272 u_char _nc_flag; \ 273 \ 274 atomic_thread_fence_acq(); \ 275 _nc_flag = atomic_load_char(&_ncp->nc_flag); \ 276 __predict_true((_nc_flag & (NCF_INVALID | NCF_WIP | NCF_WHITE)) == 0); \ 277 }) 278 279 /* 280 * Name caching works as follows: 281 * 282 * Names found by directory scans are retained in a cache 283 * for future reference. It is managed LRU, so frequently 284 * used names will hang around. Cache is indexed by hash value 285 * obtained from (dvp, name) where dvp refers to the directory 286 * containing name. 287 * 288 * If it is a "negative" entry, (i.e. for a name that is known NOT to 289 * exist) the vnode pointer will be NULL. 290 * 291 * Upon reaching the last segment of a path, if the reference 292 * is for DELETE, or NOCACHE is set (rewrite), and the 293 * name is located in the cache, it will be dropped. 294 * 295 * These locks are used (in the order in which they can be taken): 296 * NAME TYPE ROLE 297 * vnodelock mtx vnode lists and v_cache_dd field protection 298 * bucketlock mtx for access to given set of hash buckets 299 * neglist mtx negative entry LRU management 300 * 301 * It is legal to take multiple vnodelock and bucketlock locks. The locking 302 * order is lower address first. Both are recursive. 303 * 304 * "." lookups are lockless. 305 * 306 * ".." and vnode -> name lookups require vnodelock. 307 * 308 * name -> vnode lookup requires the relevant bucketlock to be held for reading. 309 * 310 * Insertions and removals of entries require involved vnodes and bucketlocks 311 * to be locked to provide safe operation against other threads modifying the 312 * cache. 313 * 314 * Some lookups result in removal of the found entry (e.g. getting rid of a 315 * negative entry with the intent to create a positive one), which poses a 316 * problem when multiple threads reach the state. Similarly, two different 317 * threads can purge two different vnodes and try to remove the same name. 318 * 319 * If the already held vnode lock is lower than the second required lock, we 320 * can just take the other lock. However, in the opposite case, this could 321 * deadlock. As such, this is resolved by trylocking and if that fails unlocking 322 * the first node, locking everything in order and revalidating the state. 323 */ 324 325 VFS_SMR_DECLARE; 326 327 static SYSCTL_NODE(_vfs_cache, OID_AUTO, param, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 328 "Name cache parameters"); 329 330 static u_int __read_mostly ncsize; /* the size as computed on creation or resizing */ 331 SYSCTL_UINT(_vfs_cache_param, OID_AUTO, size, CTLFLAG_RW, &ncsize, 0, 332 "Total namecache capacity"); 333 334 u_int ncsizefactor = 2; 335 SYSCTL_UINT(_vfs_cache_param, OID_AUTO, sizefactor, CTLFLAG_RW, &ncsizefactor, 0, 336 "Size factor for namecache"); 337 338 static u_long __read_mostly ncnegfactor = 5; /* ratio of negative entries */ 339 SYSCTL_ULONG(_vfs_cache_param, OID_AUTO, negfactor, CTLFLAG_RW, &ncnegfactor, 0, 340 "Ratio of negative namecache entries"); 341 342 /* 343 * Negative entry % of namecache capacity above which automatic eviction is allowed. 344 * 345 * Check cache_neg_evict_cond for details. 346 */ 347 static u_int ncnegminpct = 3; 348 349 static u_int __read_mostly neg_min; /* the above recomputed against ncsize */ 350 SYSCTL_UINT(_vfs_cache_param, OID_AUTO, negmin, CTLFLAG_RD, &neg_min, 0, 351 "Negative entry count above which automatic eviction is allowed"); 352 353 /* 354 * Structures associated with name caching. 355 */ 356 #define NCHHASH(hash) \ 357 (&nchashtbl[(hash) & nchash]) 358 static __read_mostly CK_SLIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */ 359 static u_long __read_mostly nchash; /* size of hash table */ 360 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 361 "Size of namecache hash table"); 362 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */ 363 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */ 364 365 struct nchstats nchstats; /* cache effectiveness statistics */ 366 367 static bool __read_frequently cache_fast_revlookup = true; 368 SYSCTL_BOOL(_vfs, OID_AUTO, cache_fast_revlookup, CTLFLAG_RW, 369 &cache_fast_revlookup, 0, ""); 370 371 static bool __read_mostly cache_rename_add = true; 372 SYSCTL_BOOL(_vfs, OID_AUTO, cache_rename_add, CTLFLAG_RW, 373 &cache_rename_add, 0, ""); 374 375 static u_int __exclusive_cache_line neg_cycle; 376 377 #define ncneghash 3 378 #define numneglists (ncneghash + 1) 379 380 struct neglist { 381 struct mtx nl_evict_lock; 382 struct mtx nl_lock __aligned(CACHE_LINE_SIZE); 383 TAILQ_HEAD(, namecache) nl_list; 384 TAILQ_HEAD(, namecache) nl_hotlist; 385 u_long nl_hotnum; 386 } __aligned(CACHE_LINE_SIZE); 387 388 static struct neglist neglists[numneglists]; 389 390 static inline struct neglist * 391 NCP2NEGLIST(struct namecache *ncp) 392 { 393 394 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]); 395 } 396 397 static inline struct negstate * 398 NCP2NEGSTATE(struct namecache *ncp) 399 { 400 401 MPASS(ncp->nc_flag & NCF_NEGATIVE); 402 return (&ncp->nc_neg); 403 } 404 405 #define numbucketlocks (ncbuckethash + 1) 406 static u_int __read_mostly ncbuckethash; 407 static struct mtx_padalign __read_mostly *bucketlocks; 408 #define HASH2BUCKETLOCK(hash) \ 409 ((struct mtx *)(&bucketlocks[((hash) & ncbuckethash)])) 410 411 #define numvnodelocks (ncvnodehash + 1) 412 static u_int __read_mostly ncvnodehash; 413 static struct mtx __read_mostly *vnodelocks; 414 static inline struct mtx * 415 VP2VNODELOCK(struct vnode *vp) 416 { 417 418 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]); 419 } 420 421 static void 422 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 423 { 424 struct namecache_ts *ncp_ts; 425 426 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 427 (tsp == NULL && ticksp == NULL), 428 ("No NCF_TS")); 429 430 if (tsp == NULL) 431 return; 432 433 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 434 *tsp = ncp_ts->nc_time; 435 *ticksp = ncp_ts->nc_ticks; 436 } 437 438 #ifdef DEBUG_CACHE 439 static int __read_mostly doingcache = 1; /* 1 => enable the cache */ 440 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 441 "VFS namecache enabled"); 442 #endif 443 444 /* Export size information to userland */ 445 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 446 sizeof(struct namecache), "sizeof(struct namecache)"); 447 448 /* 449 * The new name cache statistics 450 */ 451 static SYSCTL_NODE(_vfs_cache, OID_AUTO, stats, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 452 "Name cache statistics"); 453 454 #define STATNODE_ULONG(name, varname, descr) \ 455 SYSCTL_ULONG(_vfs_cache_stats, OID_AUTO, name, CTLFLAG_RD, &varname, 0, descr); 456 #define STATNODE_COUNTER(name, varname, descr) \ 457 static COUNTER_U64_DEFINE_EARLY(varname); \ 458 SYSCTL_COUNTER_U64(_vfs_cache_stats, OID_AUTO, name, CTLFLAG_RD, &varname, \ 459 descr); 460 STATNODE_ULONG(neg, numneg, "Number of negative cache entries"); 461 STATNODE_ULONG(count, numcache, "Number of cache entries"); 462 STATNODE_COUNTER(heldvnodes, numcachehv, "Number of namecache entries with vnodes held"); 463 STATNODE_COUNTER(drops, numdrops, "Number of dropped entries due to reaching the limit"); 464 STATNODE_COUNTER(dothits, dothits, "Number of '.' hits"); 465 STATNODE_COUNTER(dotdothis, dotdothits, "Number of '..' hits"); 466 STATNODE_COUNTER(miss, nummiss, "Number of cache misses"); 467 STATNODE_COUNTER(misszap, nummisszap, "Number of cache misses we do not want to cache"); 468 STATNODE_COUNTER(posszaps, numposzaps, 469 "Number of cache hits (positive) we do not want to cache"); 470 STATNODE_COUNTER(poshits, numposhits, "Number of cache hits (positive)"); 471 STATNODE_COUNTER(negzaps, numnegzaps, 472 "Number of cache hits (negative) we do not want to cache"); 473 STATNODE_COUNTER(neghits, numneghits, "Number of cache hits (negative)"); 474 /* These count for vn_getcwd(), too. */ 475 STATNODE_COUNTER(fullpathcalls, numfullpathcalls, "Number of fullpath search calls"); 476 STATNODE_COUNTER(fullpathfail1, numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 477 STATNODE_COUNTER(fullpathfail2, numfullpathfail2, 478 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 479 STATNODE_COUNTER(fullpathfail4, numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 480 STATNODE_COUNTER(fullpathfound, numfullpathfound, "Number of successful fullpath calls"); 481 STATNODE_COUNTER(symlinktoobig, symlinktoobig, "Number of times symlink did not fit the cache"); 482 483 /* 484 * Debug or developer statistics. 485 */ 486 static SYSCTL_NODE(_vfs_cache, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 487 "Name cache debugging"); 488 #define DEBUGNODE_ULONG(name, varname, descr) \ 489 SYSCTL_ULONG(_vfs_cache_debug, OID_AUTO, name, CTLFLAG_RD, &varname, 0, descr); 490 #define DEBUGNODE_COUNTER(name, varname, descr) \ 491 static COUNTER_U64_DEFINE_EARLY(varname); \ 492 SYSCTL_COUNTER_U64(_vfs_cache_debug, OID_AUTO, name, CTLFLAG_RD, &varname, \ 493 descr); 494 DEBUGNODE_COUNTER(zap_bucket_relock_success, zap_bucket_relock_success, 495 "Number of successful removals after relocking"); 496 static long zap_bucket_fail; 497 DEBUGNODE_ULONG(zap_bucket_fail, zap_bucket_fail, ""); 498 static long zap_bucket_fail2; 499 DEBUGNODE_ULONG(zap_bucket_fail2, zap_bucket_fail2, ""); 500 static long cache_lock_vnodes_cel_3_failures; 501 DEBUGNODE_ULONG(vnodes_cel_3_failures, cache_lock_vnodes_cel_3_failures, 502 "Number of times 3-way vnode locking failed"); 503 504 static void cache_zap_locked(struct namecache *ncp); 505 static int vn_fullpath_hardlink(struct nameidata *ndp, char **retbuf, 506 char **freebuf, size_t *buflen); 507 static int vn_fullpath_any_smr(struct vnode *vp, struct vnode *rdir, char *buf, 508 char **retbuf, size_t *buflen, size_t addend); 509 static int vn_fullpath_any(struct vnode *vp, struct vnode *rdir, char *buf, 510 char **retbuf, size_t *buflen); 511 static int vn_fullpath_dir(struct vnode *vp, struct vnode *rdir, char *buf, 512 char **retbuf, size_t *len, size_t addend); 513 514 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 515 516 static inline void 517 cache_assert_vlp_locked(struct mtx *vlp) 518 { 519 520 if (vlp != NULL) 521 mtx_assert(vlp, MA_OWNED); 522 } 523 524 static inline void 525 cache_assert_vnode_locked(struct vnode *vp) 526 { 527 struct mtx *vlp; 528 529 vlp = VP2VNODELOCK(vp); 530 cache_assert_vlp_locked(vlp); 531 } 532 533 /* 534 * Directory vnodes with entries are held for two reasons: 535 * 1. make them less of a target for reclamation in vnlru 536 * 2. suffer smaller performance penalty in locked lookup as requeieing is avoided 537 * 538 * It will be feasible to stop doing it altogether if all filesystems start 539 * supporting lockless lookup. 540 */ 541 static void 542 cache_hold_vnode(struct vnode *vp) 543 { 544 545 cache_assert_vnode_locked(vp); 546 VNPASS(LIST_EMPTY(&vp->v_cache_src), vp); 547 vhold(vp); 548 counter_u64_add(numcachehv, 1); 549 } 550 551 static void 552 cache_drop_vnode(struct vnode *vp) 553 { 554 555 /* 556 * Called after all locks are dropped, meaning we can't assert 557 * on the state of v_cache_src. 558 */ 559 vdrop(vp); 560 counter_u64_add(numcachehv, -1); 561 } 562 563 /* 564 * UMA zones. 565 */ 566 static uma_zone_t __read_mostly cache_zone_small; 567 static uma_zone_t __read_mostly cache_zone_small_ts; 568 static uma_zone_t __read_mostly cache_zone_large; 569 static uma_zone_t __read_mostly cache_zone_large_ts; 570 571 char * 572 cache_symlink_alloc(size_t size, int flags) 573 { 574 575 if (size < CACHE_ZONE_SMALL_SIZE) { 576 return (uma_zalloc_smr(cache_zone_small, flags)); 577 } 578 if (size < CACHE_ZONE_LARGE_SIZE) { 579 return (uma_zalloc_smr(cache_zone_large, flags)); 580 } 581 counter_u64_add(symlinktoobig, 1); 582 SDT_PROBE1(vfs, namecache, symlink, alloc__fail, size); 583 return (NULL); 584 } 585 586 void 587 cache_symlink_free(char *string, size_t size) 588 { 589 590 MPASS(string != NULL); 591 KASSERT(size < CACHE_ZONE_LARGE_SIZE, 592 ("%s: size %zu too big", __func__, size)); 593 594 if (size < CACHE_ZONE_SMALL_SIZE) { 595 uma_zfree_smr(cache_zone_small, string); 596 return; 597 } 598 if (size < CACHE_ZONE_LARGE_SIZE) { 599 uma_zfree_smr(cache_zone_large, string); 600 return; 601 } 602 __assert_unreachable(); 603 } 604 605 static struct namecache * 606 cache_alloc_uma(int len, bool ts) 607 { 608 struct namecache_ts *ncp_ts; 609 struct namecache *ncp; 610 611 if (__predict_false(ts)) { 612 if (len <= CACHE_PATH_CUTOFF) 613 ncp_ts = uma_zalloc_smr(cache_zone_small_ts, M_WAITOK); 614 else 615 ncp_ts = uma_zalloc_smr(cache_zone_large_ts, M_WAITOK); 616 ncp = &ncp_ts->nc_nc; 617 } else { 618 if (len <= CACHE_PATH_CUTOFF) 619 ncp = uma_zalloc_smr(cache_zone_small, M_WAITOK); 620 else 621 ncp = uma_zalloc_smr(cache_zone_large, M_WAITOK); 622 } 623 return (ncp); 624 } 625 626 static void 627 cache_free_uma(struct namecache *ncp) 628 { 629 struct namecache_ts *ncp_ts; 630 631 if (__predict_false(ncp->nc_flag & NCF_TS)) { 632 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 633 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 634 uma_zfree_smr(cache_zone_small_ts, ncp_ts); 635 else 636 uma_zfree_smr(cache_zone_large_ts, ncp_ts); 637 } else { 638 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 639 uma_zfree_smr(cache_zone_small, ncp); 640 else 641 uma_zfree_smr(cache_zone_large, ncp); 642 } 643 } 644 645 static struct namecache * 646 cache_alloc(int len, bool ts) 647 { 648 u_long lnumcache; 649 650 /* 651 * Avoid blowout in namecache entries. 652 * 653 * Bugs: 654 * 1. filesystems may end up trying to add an already existing entry 655 * (for example this can happen after a cache miss during concurrent 656 * lookup), in which case we will call cache_neg_evict despite not 657 * adding anything. 658 * 2. the routine may fail to free anything and no provisions are made 659 * to make it try harder (see the inside for failure modes) 660 * 3. it only ever looks at negative entries. 661 */ 662 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1; 663 if (cache_neg_evict_cond(lnumcache)) { 664 lnumcache = atomic_load_long(&numcache); 665 } 666 if (__predict_false(lnumcache >= ncsize)) { 667 atomic_subtract_long(&numcache, 1); 668 counter_u64_add(numdrops, 1); 669 return (NULL); 670 } 671 return (cache_alloc_uma(len, ts)); 672 } 673 674 static void 675 cache_free(struct namecache *ncp) 676 { 677 678 MPASS(ncp != NULL); 679 if ((ncp->nc_flag & NCF_DVDROP) != 0) { 680 cache_drop_vnode(ncp->nc_dvp); 681 } 682 cache_free_uma(ncp); 683 atomic_subtract_long(&numcache, 1); 684 } 685 686 static void 687 cache_free_batch(struct cache_freebatch *batch) 688 { 689 struct namecache *ncp, *nnp; 690 int i; 691 692 i = 0; 693 if (TAILQ_EMPTY(batch)) 694 goto out; 695 TAILQ_FOREACH_SAFE(ncp, batch, nc_dst, nnp) { 696 if ((ncp->nc_flag & NCF_DVDROP) != 0) { 697 cache_drop_vnode(ncp->nc_dvp); 698 } 699 cache_free_uma(ncp); 700 i++; 701 } 702 atomic_subtract_long(&numcache, i); 703 out: 704 SDT_PROBE1(vfs, namecache, purge, batch, i); 705 } 706 707 /* 708 * TODO: With the value stored we can do better than computing the hash based 709 * on the address. The choice of FNV should also be revisited. 710 */ 711 static void 712 cache_prehash(struct vnode *vp) 713 { 714 715 vp->v_nchash = fnv_32_buf(&vp, sizeof(vp), FNV1_32_INIT); 716 } 717 718 static uint32_t 719 cache_get_hash(char *name, u_char len, struct vnode *dvp) 720 { 721 722 return (fnv_32_buf(name, len, dvp->v_nchash)); 723 } 724 725 static inline struct nchashhead * 726 NCP2BUCKET(struct namecache *ncp) 727 { 728 uint32_t hash; 729 730 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); 731 return (NCHHASH(hash)); 732 } 733 734 static inline struct mtx * 735 NCP2BUCKETLOCK(struct namecache *ncp) 736 { 737 uint32_t hash; 738 739 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); 740 return (HASH2BUCKETLOCK(hash)); 741 } 742 743 #ifdef INVARIANTS 744 static void 745 cache_assert_bucket_locked(struct namecache *ncp) 746 { 747 struct mtx *blp; 748 749 blp = NCP2BUCKETLOCK(ncp); 750 mtx_assert(blp, MA_OWNED); 751 } 752 753 static void 754 cache_assert_bucket_unlocked(struct namecache *ncp) 755 { 756 struct mtx *blp; 757 758 blp = NCP2BUCKETLOCK(ncp); 759 mtx_assert(blp, MA_NOTOWNED); 760 } 761 #else 762 #define cache_assert_bucket_locked(x) do { } while (0) 763 #define cache_assert_bucket_unlocked(x) do { } while (0) 764 #endif 765 766 #define cache_sort_vnodes(x, y) _cache_sort_vnodes((void **)(x), (void **)(y)) 767 static void 768 _cache_sort_vnodes(void **p1, void **p2) 769 { 770 void *tmp; 771 772 MPASS(*p1 != NULL || *p2 != NULL); 773 774 if (*p1 > *p2) { 775 tmp = *p2; 776 *p2 = *p1; 777 *p1 = tmp; 778 } 779 } 780 781 static void 782 cache_lock_all_buckets(void) 783 { 784 u_int i; 785 786 for (i = 0; i < numbucketlocks; i++) 787 mtx_lock(&bucketlocks[i]); 788 } 789 790 static void 791 cache_unlock_all_buckets(void) 792 { 793 u_int i; 794 795 for (i = 0; i < numbucketlocks; i++) 796 mtx_unlock(&bucketlocks[i]); 797 } 798 799 static void 800 cache_lock_all_vnodes(void) 801 { 802 u_int i; 803 804 for (i = 0; i < numvnodelocks; i++) 805 mtx_lock(&vnodelocks[i]); 806 } 807 808 static void 809 cache_unlock_all_vnodes(void) 810 { 811 u_int i; 812 813 for (i = 0; i < numvnodelocks; i++) 814 mtx_unlock(&vnodelocks[i]); 815 } 816 817 static int 818 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 819 { 820 821 cache_sort_vnodes(&vlp1, &vlp2); 822 823 if (vlp1 != NULL) { 824 if (!mtx_trylock(vlp1)) 825 return (EAGAIN); 826 } 827 if (!mtx_trylock(vlp2)) { 828 if (vlp1 != NULL) 829 mtx_unlock(vlp1); 830 return (EAGAIN); 831 } 832 833 return (0); 834 } 835 836 static void 837 cache_lock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 838 { 839 840 MPASS(vlp1 != NULL || vlp2 != NULL); 841 MPASS(vlp1 <= vlp2); 842 843 if (vlp1 != NULL) 844 mtx_lock(vlp1); 845 if (vlp2 != NULL) 846 mtx_lock(vlp2); 847 } 848 849 static void 850 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 851 { 852 853 MPASS(vlp1 != NULL || vlp2 != NULL); 854 855 if (vlp1 != NULL) 856 mtx_unlock(vlp1); 857 if (vlp2 != NULL) 858 mtx_unlock(vlp2); 859 } 860 861 static int 862 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 863 { 864 struct nchstats snap; 865 866 if (req->oldptr == NULL) 867 return (SYSCTL_OUT(req, 0, sizeof(snap))); 868 869 snap = nchstats; 870 snap.ncs_goodhits = counter_u64_fetch(numposhits); 871 snap.ncs_neghits = counter_u64_fetch(numneghits); 872 snap.ncs_badhits = counter_u64_fetch(numposzaps) + 873 counter_u64_fetch(numnegzaps); 874 snap.ncs_miss = counter_u64_fetch(nummisszap) + 875 counter_u64_fetch(nummiss); 876 877 return (SYSCTL_OUT(req, &snap, sizeof(snap))); 878 } 879 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD | 880 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU", 881 "VFS cache effectiveness statistics"); 882 883 static void 884 cache_recalc_neg_min(u_int val) 885 { 886 887 neg_min = (ncsize * val) / 100; 888 } 889 890 static int 891 sysctl_negminpct(SYSCTL_HANDLER_ARGS) 892 { 893 u_int val; 894 int error; 895 896 val = ncnegminpct; 897 error = sysctl_handle_int(oidp, &val, 0, req); 898 if (error != 0 || req->newptr == NULL) 899 return (error); 900 901 if (val == ncnegminpct) 902 return (0); 903 if (val < 0 || val > 99) 904 return (EINVAL); 905 ncnegminpct = val; 906 cache_recalc_neg_min(val); 907 return (0); 908 } 909 910 SYSCTL_PROC(_vfs_cache_param, OID_AUTO, negminpct, 911 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0, sysctl_negminpct, 912 "I", "Negative entry \% of namecache capacity above which automatic eviction is allowed"); 913 914 #ifdef DIAGNOSTIC 915 /* 916 * Grab an atomic snapshot of the name cache hash chain lengths 917 */ 918 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, 919 CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 920 "hash table stats"); 921 922 static int 923 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 924 { 925 struct nchashhead *ncpp; 926 struct namecache *ncp; 927 int i, error, n_nchash, *cntbuf; 928 929 retry: 930 n_nchash = nchash + 1; /* nchash is max index, not count */ 931 if (req->oldptr == NULL) 932 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 933 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 934 cache_lock_all_buckets(); 935 if (n_nchash != nchash + 1) { 936 cache_unlock_all_buckets(); 937 free(cntbuf, M_TEMP); 938 goto retry; 939 } 940 /* Scan hash tables counting entries */ 941 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 942 CK_SLIST_FOREACH(ncp, ncpp, nc_hash) 943 cntbuf[i]++; 944 cache_unlock_all_buckets(); 945 for (error = 0, i = 0; i < n_nchash; i++) 946 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 947 break; 948 free(cntbuf, M_TEMP); 949 return (error); 950 } 951 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 952 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 953 "nchash chain lengths"); 954 955 static int 956 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 957 { 958 int error; 959 struct nchashhead *ncpp; 960 struct namecache *ncp; 961 int n_nchash; 962 int count, maxlength, used, pct; 963 964 if (!req->oldptr) 965 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 966 967 cache_lock_all_buckets(); 968 n_nchash = nchash + 1; /* nchash is max index, not count */ 969 used = 0; 970 maxlength = 0; 971 972 /* Scan hash tables for applicable entries */ 973 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 974 count = 0; 975 CK_SLIST_FOREACH(ncp, ncpp, nc_hash) { 976 count++; 977 } 978 if (count) 979 used++; 980 if (maxlength < count) 981 maxlength = count; 982 } 983 n_nchash = nchash + 1; 984 cache_unlock_all_buckets(); 985 pct = (used * 100) / (n_nchash / 100); 986 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 987 if (error) 988 return (error); 989 error = SYSCTL_OUT(req, &used, sizeof(used)); 990 if (error) 991 return (error); 992 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 993 if (error) 994 return (error); 995 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 996 if (error) 997 return (error); 998 return (0); 999 } 1000 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 1001 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 1002 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 1003 #endif 1004 1005 /* 1006 * Negative entries management 1007 * 1008 * Various workloads create plenty of negative entries and barely use them 1009 * afterwards. Moreover malicious users can keep performing bogus lookups 1010 * adding even more entries. For example "make tinderbox" as of writing this 1011 * comment ends up with 2.6M namecache entries in total, 1.2M of which are 1012 * negative. 1013 * 1014 * As such, a rather aggressive eviction method is needed. The currently 1015 * employed method is a placeholder. 1016 * 1017 * Entries are split over numneglists separate lists, each of which is further 1018 * split into hot and cold entries. Entries get promoted after getting a hit. 1019 * Eviction happens on addition of new entry. 1020 */ 1021 static SYSCTL_NODE(_vfs_cache, OID_AUTO, neg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 1022 "Name cache negative entry statistics"); 1023 1024 SYSCTL_ULONG(_vfs_cache_neg, OID_AUTO, count, CTLFLAG_RD, &numneg, 0, 1025 "Number of negative cache entries"); 1026 1027 static COUNTER_U64_DEFINE_EARLY(neg_created); 1028 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, created, CTLFLAG_RD, &neg_created, 1029 "Number of created negative entries"); 1030 1031 static COUNTER_U64_DEFINE_EARLY(neg_evicted); 1032 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evicted, CTLFLAG_RD, &neg_evicted, 1033 "Number of evicted negative entries"); 1034 1035 static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_empty); 1036 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_empty, CTLFLAG_RD, 1037 &neg_evict_skipped_empty, 1038 "Number of times evicting failed due to lack of entries"); 1039 1040 static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_missed); 1041 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_missed, CTLFLAG_RD, 1042 &neg_evict_skipped_missed, 1043 "Number of times evicting failed due to target entry disappearing"); 1044 1045 static COUNTER_U64_DEFINE_EARLY(neg_evict_skipped_contended); 1046 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, evict_skipped_contended, CTLFLAG_RD, 1047 &neg_evict_skipped_contended, 1048 "Number of times evicting failed due to contention"); 1049 1050 SYSCTL_COUNTER_U64(_vfs_cache_neg, OID_AUTO, hits, CTLFLAG_RD, &numneghits, 1051 "Number of cache hits (negative)"); 1052 1053 static int 1054 sysctl_neg_hot(SYSCTL_HANDLER_ARGS) 1055 { 1056 int i, out; 1057 1058 out = 0; 1059 for (i = 0; i < numneglists; i++) 1060 out += neglists[i].nl_hotnum; 1061 1062 return (SYSCTL_OUT(req, &out, sizeof(out))); 1063 } 1064 SYSCTL_PROC(_vfs_cache_neg, OID_AUTO, hot, CTLTYPE_INT | CTLFLAG_RD | 1065 CTLFLAG_MPSAFE, 0, 0, sysctl_neg_hot, "I", 1066 "Number of hot negative entries"); 1067 1068 static void 1069 cache_neg_init(struct namecache *ncp) 1070 { 1071 struct negstate *ns; 1072 1073 ncp->nc_flag |= NCF_NEGATIVE; 1074 ns = NCP2NEGSTATE(ncp); 1075 ns->neg_flag = 0; 1076 ns->neg_hit = 0; 1077 counter_u64_add(neg_created, 1); 1078 } 1079 1080 #define CACHE_NEG_PROMOTION_THRESH 2 1081 1082 static bool 1083 cache_neg_hit_prep(struct namecache *ncp) 1084 { 1085 struct negstate *ns; 1086 u_char n; 1087 1088 ns = NCP2NEGSTATE(ncp); 1089 n = atomic_load_char(&ns->neg_hit); 1090 for (;;) { 1091 if (n >= CACHE_NEG_PROMOTION_THRESH) 1092 return (false); 1093 if (atomic_fcmpset_8(&ns->neg_hit, &n, n + 1)) 1094 break; 1095 } 1096 return (n + 1 == CACHE_NEG_PROMOTION_THRESH); 1097 } 1098 1099 /* 1100 * Nothing to do here but it is provided for completeness as some 1101 * cache_neg_hit_prep callers may end up returning without even 1102 * trying to promote. 1103 */ 1104 #define cache_neg_hit_abort(ncp) do { } while (0) 1105 1106 static void 1107 cache_neg_hit_finish(struct namecache *ncp) 1108 { 1109 1110 SDT_PROBE2(vfs, namecache, lookup, hit__negative, ncp->nc_dvp, ncp->nc_name); 1111 counter_u64_add(numneghits, 1); 1112 } 1113 1114 /* 1115 * Move a negative entry to the hot list. 1116 */ 1117 static void 1118 cache_neg_promote_locked(struct namecache *ncp) 1119 { 1120 struct neglist *nl; 1121 struct negstate *ns; 1122 1123 ns = NCP2NEGSTATE(ncp); 1124 nl = NCP2NEGLIST(ncp); 1125 mtx_assert(&nl->nl_lock, MA_OWNED); 1126 if ((ns->neg_flag & NEG_HOT) == 0) { 1127 TAILQ_REMOVE(&nl->nl_list, ncp, nc_dst); 1128 TAILQ_INSERT_TAIL(&nl->nl_hotlist, ncp, nc_dst); 1129 nl->nl_hotnum++; 1130 ns->neg_flag |= NEG_HOT; 1131 } 1132 } 1133 1134 /* 1135 * Move a hot negative entry to the cold list. 1136 */ 1137 static void 1138 cache_neg_demote_locked(struct namecache *ncp) 1139 { 1140 struct neglist *nl; 1141 struct negstate *ns; 1142 1143 ns = NCP2NEGSTATE(ncp); 1144 nl = NCP2NEGLIST(ncp); 1145 mtx_assert(&nl->nl_lock, MA_OWNED); 1146 MPASS(ns->neg_flag & NEG_HOT); 1147 TAILQ_REMOVE(&nl->nl_hotlist, ncp, nc_dst); 1148 TAILQ_INSERT_TAIL(&nl->nl_list, ncp, nc_dst); 1149 nl->nl_hotnum--; 1150 ns->neg_flag &= ~NEG_HOT; 1151 atomic_store_char(&ns->neg_hit, 0); 1152 } 1153 1154 /* 1155 * Move a negative entry to the hot list if it matches the lookup. 1156 * 1157 * We have to take locks, but they may be contended and in the worst 1158 * case we may need to go off CPU. We don't want to spin within the 1159 * smr section and we can't block with it. Exiting the section means 1160 * the found entry could have been evicted. We are going to look it 1161 * up again. 1162 */ 1163 static bool 1164 cache_neg_promote_cond(struct vnode *dvp, struct componentname *cnp, 1165 struct namecache *oncp, uint32_t hash) 1166 { 1167 struct namecache *ncp; 1168 struct neglist *nl; 1169 u_char nc_flag; 1170 1171 nl = NCP2NEGLIST(oncp); 1172 1173 mtx_lock(&nl->nl_lock); 1174 /* 1175 * For hash iteration. 1176 */ 1177 vfs_smr_enter(); 1178 1179 /* 1180 * Avoid all surprises by only succeeding if we got the same entry and 1181 * bailing completely otherwise. 1182 * XXX There are no provisions to keep the vnode around, meaning we may 1183 * end up promoting a negative entry for a *new* vnode and returning 1184 * ENOENT on its account. This is the error we want to return anyway 1185 * and promotion is harmless. 1186 * 1187 * In particular at this point there can be a new ncp which matches the 1188 * search but hashes to a different neglist. 1189 */ 1190 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1191 if (ncp == oncp) 1192 break; 1193 } 1194 1195 /* 1196 * No match to begin with. 1197 */ 1198 if (__predict_false(ncp == NULL)) { 1199 goto out_abort; 1200 } 1201 1202 /* 1203 * The newly found entry may be something different... 1204 */ 1205 if (!(ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1206 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))) { 1207 goto out_abort; 1208 } 1209 1210 /* 1211 * ... and not even negative. 1212 */ 1213 nc_flag = atomic_load_char(&ncp->nc_flag); 1214 if ((nc_flag & NCF_NEGATIVE) == 0) { 1215 goto out_abort; 1216 } 1217 1218 if (!cache_ncp_canuse(ncp)) { 1219 goto out_abort; 1220 } 1221 1222 cache_neg_promote_locked(ncp); 1223 cache_neg_hit_finish(ncp); 1224 vfs_smr_exit(); 1225 mtx_unlock(&nl->nl_lock); 1226 return (true); 1227 out_abort: 1228 vfs_smr_exit(); 1229 mtx_unlock(&nl->nl_lock); 1230 return (false); 1231 } 1232 1233 static void 1234 cache_neg_promote(struct namecache *ncp) 1235 { 1236 struct neglist *nl; 1237 1238 nl = NCP2NEGLIST(ncp); 1239 mtx_lock(&nl->nl_lock); 1240 cache_neg_promote_locked(ncp); 1241 mtx_unlock(&nl->nl_lock); 1242 } 1243 1244 static void 1245 cache_neg_insert(struct namecache *ncp) 1246 { 1247 struct neglist *nl; 1248 1249 MPASS(ncp->nc_flag & NCF_NEGATIVE); 1250 cache_assert_bucket_locked(ncp); 1251 nl = NCP2NEGLIST(ncp); 1252 mtx_lock(&nl->nl_lock); 1253 TAILQ_INSERT_TAIL(&nl->nl_list, ncp, nc_dst); 1254 mtx_unlock(&nl->nl_lock); 1255 atomic_add_long(&numneg, 1); 1256 } 1257 1258 static void 1259 cache_neg_remove(struct namecache *ncp) 1260 { 1261 struct neglist *nl; 1262 struct negstate *ns; 1263 1264 cache_assert_bucket_locked(ncp); 1265 nl = NCP2NEGLIST(ncp); 1266 ns = NCP2NEGSTATE(ncp); 1267 mtx_lock(&nl->nl_lock); 1268 if ((ns->neg_flag & NEG_HOT) != 0) { 1269 TAILQ_REMOVE(&nl->nl_hotlist, ncp, nc_dst); 1270 nl->nl_hotnum--; 1271 } else { 1272 TAILQ_REMOVE(&nl->nl_list, ncp, nc_dst); 1273 } 1274 mtx_unlock(&nl->nl_lock); 1275 atomic_subtract_long(&numneg, 1); 1276 } 1277 1278 static struct neglist * 1279 cache_neg_evict_select_list(void) 1280 { 1281 struct neglist *nl; 1282 u_int c; 1283 1284 c = atomic_fetchadd_int(&neg_cycle, 1) + 1; 1285 nl = &neglists[c % numneglists]; 1286 if (!mtx_trylock(&nl->nl_evict_lock)) { 1287 counter_u64_add(neg_evict_skipped_contended, 1); 1288 return (NULL); 1289 } 1290 return (nl); 1291 } 1292 1293 static struct namecache * 1294 cache_neg_evict_select_entry(struct neglist *nl) 1295 { 1296 struct namecache *ncp, *lncp; 1297 struct negstate *ns, *lns; 1298 int i; 1299 1300 mtx_assert(&nl->nl_evict_lock, MA_OWNED); 1301 mtx_assert(&nl->nl_lock, MA_OWNED); 1302 ncp = TAILQ_FIRST(&nl->nl_list); 1303 if (ncp == NULL) 1304 return (NULL); 1305 lncp = ncp; 1306 lns = NCP2NEGSTATE(lncp); 1307 for (i = 1; i < 4; i++) { 1308 ncp = TAILQ_NEXT(ncp, nc_dst); 1309 if (ncp == NULL) 1310 break; 1311 ns = NCP2NEGSTATE(ncp); 1312 if (ns->neg_hit < lns->neg_hit) { 1313 lncp = ncp; 1314 lns = ns; 1315 } 1316 } 1317 return (lncp); 1318 } 1319 1320 static bool 1321 cache_neg_evict(void) 1322 { 1323 struct namecache *ncp, *ncp2; 1324 struct neglist *nl; 1325 struct vnode *dvp; 1326 struct mtx *dvlp; 1327 struct mtx *blp; 1328 uint32_t hash; 1329 u_char nlen; 1330 bool evicted; 1331 1332 nl = cache_neg_evict_select_list(); 1333 if (nl == NULL) { 1334 return (false); 1335 } 1336 1337 mtx_lock(&nl->nl_lock); 1338 ncp = TAILQ_FIRST(&nl->nl_hotlist); 1339 if (ncp != NULL) { 1340 cache_neg_demote_locked(ncp); 1341 } 1342 ncp = cache_neg_evict_select_entry(nl); 1343 if (ncp == NULL) { 1344 counter_u64_add(neg_evict_skipped_empty, 1); 1345 mtx_unlock(&nl->nl_lock); 1346 mtx_unlock(&nl->nl_evict_lock); 1347 return (false); 1348 } 1349 nlen = ncp->nc_nlen; 1350 dvp = ncp->nc_dvp; 1351 hash = cache_get_hash(ncp->nc_name, nlen, dvp); 1352 dvlp = VP2VNODELOCK(dvp); 1353 blp = HASH2BUCKETLOCK(hash); 1354 mtx_unlock(&nl->nl_lock); 1355 mtx_unlock(&nl->nl_evict_lock); 1356 mtx_lock(dvlp); 1357 mtx_lock(blp); 1358 /* 1359 * Note that since all locks were dropped above, the entry may be 1360 * gone or reallocated to be something else. 1361 */ 1362 CK_SLIST_FOREACH(ncp2, (NCHHASH(hash)), nc_hash) { 1363 if (ncp2 == ncp && ncp2->nc_dvp == dvp && 1364 ncp2->nc_nlen == nlen && (ncp2->nc_flag & NCF_NEGATIVE) != 0) 1365 break; 1366 } 1367 if (ncp2 == NULL) { 1368 counter_u64_add(neg_evict_skipped_missed, 1); 1369 ncp = NULL; 1370 evicted = false; 1371 } else { 1372 MPASS(dvlp == VP2VNODELOCK(ncp->nc_dvp)); 1373 MPASS(blp == NCP2BUCKETLOCK(ncp)); 1374 SDT_PROBE2(vfs, namecache, evict_negative, done, ncp->nc_dvp, 1375 ncp->nc_name); 1376 cache_zap_locked(ncp); 1377 counter_u64_add(neg_evicted, 1); 1378 evicted = true; 1379 } 1380 mtx_unlock(blp); 1381 mtx_unlock(dvlp); 1382 if (ncp != NULL) 1383 cache_free(ncp); 1384 return (evicted); 1385 } 1386 1387 /* 1388 * Maybe evict a negative entry to create more room. 1389 * 1390 * The ncnegfactor parameter limits what fraction of the total count 1391 * can comprise of negative entries. However, if the cache is just 1392 * warming up this leads to excessive evictions. As such, ncnegminpct 1393 * (recomputed to neg_min) dictates whether the above should be 1394 * applied. 1395 * 1396 * Try evicting if the cache is close to full capacity regardless of 1397 * other considerations. 1398 */ 1399 static bool 1400 cache_neg_evict_cond(u_long lnumcache) 1401 { 1402 u_long lnumneg; 1403 1404 if (ncsize - 1000 < lnumcache) 1405 goto out_evict; 1406 lnumneg = atomic_load_long(&numneg); 1407 if (lnumneg < neg_min) 1408 return (false); 1409 if (lnumneg * ncnegfactor < lnumcache) 1410 return (false); 1411 out_evict: 1412 return (cache_neg_evict()); 1413 } 1414 1415 /* 1416 * cache_zap_locked(): 1417 * 1418 * Removes a namecache entry from cache, whether it contains an actual 1419 * pointer to a vnode or if it is just a negative cache entry. 1420 */ 1421 static void 1422 cache_zap_locked(struct namecache *ncp) 1423 { 1424 struct nchashhead *ncpp; 1425 1426 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1427 cache_assert_vnode_locked(ncp->nc_vp); 1428 cache_assert_vnode_locked(ncp->nc_dvp); 1429 cache_assert_bucket_locked(ncp); 1430 1431 cache_ncp_invalidate(ncp); 1432 1433 ncpp = NCP2BUCKET(ncp); 1434 CK_SLIST_REMOVE(ncpp, ncp, namecache, nc_hash); 1435 if (!(ncp->nc_flag & NCF_NEGATIVE)) { 1436 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp, 1437 ncp->nc_name, ncp->nc_vp); 1438 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 1439 if (ncp == ncp->nc_vp->v_cache_dd) { 1440 vn_seqc_write_begin_unheld(ncp->nc_vp); 1441 ncp->nc_vp->v_cache_dd = NULL; 1442 vn_seqc_write_end(ncp->nc_vp); 1443 } 1444 } else { 1445 SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp, 1446 ncp->nc_name); 1447 cache_neg_remove(ncp); 1448 } 1449 if (ncp->nc_flag & NCF_ISDOTDOT) { 1450 if (ncp == ncp->nc_dvp->v_cache_dd) { 1451 vn_seqc_write_begin_unheld(ncp->nc_dvp); 1452 ncp->nc_dvp->v_cache_dd = NULL; 1453 vn_seqc_write_end(ncp->nc_dvp); 1454 } 1455 } else { 1456 LIST_REMOVE(ncp, nc_src); 1457 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 1458 ncp->nc_flag |= NCF_DVDROP; 1459 } 1460 } 1461 } 1462 1463 static void 1464 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp) 1465 { 1466 struct mtx *blp; 1467 1468 MPASS(ncp->nc_dvp == vp); 1469 MPASS(ncp->nc_flag & NCF_NEGATIVE); 1470 cache_assert_vnode_locked(vp); 1471 1472 blp = NCP2BUCKETLOCK(ncp); 1473 mtx_lock(blp); 1474 cache_zap_locked(ncp); 1475 mtx_unlock(blp); 1476 } 1477 1478 static bool 1479 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp, 1480 struct mtx **vlpp) 1481 { 1482 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; 1483 struct mtx *blp; 1484 1485 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp); 1486 cache_assert_vnode_locked(vp); 1487 1488 if (ncp->nc_flag & NCF_NEGATIVE) { 1489 if (*vlpp != NULL) { 1490 mtx_unlock(*vlpp); 1491 *vlpp = NULL; 1492 } 1493 cache_zap_negative_locked_vnode_kl(ncp, vp); 1494 return (true); 1495 } 1496 1497 pvlp = VP2VNODELOCK(vp); 1498 blp = NCP2BUCKETLOCK(ncp); 1499 vlp1 = VP2VNODELOCK(ncp->nc_dvp); 1500 vlp2 = VP2VNODELOCK(ncp->nc_vp); 1501 1502 if (*vlpp == vlp1 || *vlpp == vlp2) { 1503 to_unlock = *vlpp; 1504 *vlpp = NULL; 1505 } else { 1506 if (*vlpp != NULL) { 1507 mtx_unlock(*vlpp); 1508 *vlpp = NULL; 1509 } 1510 cache_sort_vnodes(&vlp1, &vlp2); 1511 if (vlp1 == pvlp) { 1512 mtx_lock(vlp2); 1513 to_unlock = vlp2; 1514 } else { 1515 if (!mtx_trylock(vlp1)) 1516 goto out_relock; 1517 to_unlock = vlp1; 1518 } 1519 } 1520 mtx_lock(blp); 1521 cache_zap_locked(ncp); 1522 mtx_unlock(blp); 1523 if (to_unlock != NULL) 1524 mtx_unlock(to_unlock); 1525 return (true); 1526 1527 out_relock: 1528 mtx_unlock(vlp2); 1529 mtx_lock(vlp1); 1530 mtx_lock(vlp2); 1531 MPASS(*vlpp == NULL); 1532 *vlpp = vlp1; 1533 return (false); 1534 } 1535 1536 /* 1537 * If trylocking failed we can get here. We know enough to take all needed locks 1538 * in the right order and re-lookup the entry. 1539 */ 1540 static int 1541 cache_zap_unlocked_bucket(struct namecache *ncp, struct componentname *cnp, 1542 struct vnode *dvp, struct mtx *dvlp, struct mtx *vlp, uint32_t hash, 1543 struct mtx *blp) 1544 { 1545 struct namecache *rncp; 1546 1547 cache_assert_bucket_unlocked(ncp); 1548 1549 cache_sort_vnodes(&dvlp, &vlp); 1550 cache_lock_vnodes(dvlp, vlp); 1551 mtx_lock(blp); 1552 CK_SLIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) { 1553 if (rncp == ncp && rncp->nc_dvp == dvp && 1554 rncp->nc_nlen == cnp->cn_namelen && 1555 !bcmp(rncp->nc_name, cnp->cn_nameptr, rncp->nc_nlen)) 1556 break; 1557 } 1558 if (rncp != NULL) { 1559 cache_zap_locked(rncp); 1560 mtx_unlock(blp); 1561 cache_unlock_vnodes(dvlp, vlp); 1562 counter_u64_add(zap_bucket_relock_success, 1); 1563 return (0); 1564 } 1565 1566 mtx_unlock(blp); 1567 cache_unlock_vnodes(dvlp, vlp); 1568 return (EAGAIN); 1569 } 1570 1571 static int __noinline 1572 cache_zap_locked_bucket(struct namecache *ncp, struct componentname *cnp, 1573 uint32_t hash, struct mtx *blp) 1574 { 1575 struct mtx *dvlp, *vlp; 1576 struct vnode *dvp; 1577 1578 cache_assert_bucket_locked(ncp); 1579 1580 dvlp = VP2VNODELOCK(ncp->nc_dvp); 1581 vlp = NULL; 1582 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1583 vlp = VP2VNODELOCK(ncp->nc_vp); 1584 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1585 cache_zap_locked(ncp); 1586 mtx_unlock(blp); 1587 cache_unlock_vnodes(dvlp, vlp); 1588 return (0); 1589 } 1590 1591 dvp = ncp->nc_dvp; 1592 mtx_unlock(blp); 1593 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp)); 1594 } 1595 1596 static __noinline int 1597 cache_remove_cnp(struct vnode *dvp, struct componentname *cnp) 1598 { 1599 struct namecache *ncp; 1600 struct mtx *blp; 1601 struct mtx *dvlp, *dvlp2; 1602 uint32_t hash; 1603 int error; 1604 1605 if (cnp->cn_namelen == 2 && 1606 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { 1607 dvlp = VP2VNODELOCK(dvp); 1608 dvlp2 = NULL; 1609 mtx_lock(dvlp); 1610 retry_dotdot: 1611 ncp = dvp->v_cache_dd; 1612 if (ncp == NULL) { 1613 mtx_unlock(dvlp); 1614 if (dvlp2 != NULL) 1615 mtx_unlock(dvlp2); 1616 SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp); 1617 return (0); 1618 } 1619 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1620 if (!cache_zap_locked_vnode_kl2(ncp, dvp, &dvlp2)) 1621 goto retry_dotdot; 1622 MPASS(dvp->v_cache_dd == NULL); 1623 mtx_unlock(dvlp); 1624 if (dvlp2 != NULL) 1625 mtx_unlock(dvlp2); 1626 cache_free(ncp); 1627 } else { 1628 vn_seqc_write_begin(dvp); 1629 dvp->v_cache_dd = NULL; 1630 vn_seqc_write_end(dvp); 1631 mtx_unlock(dvlp); 1632 if (dvlp2 != NULL) 1633 mtx_unlock(dvlp2); 1634 } 1635 SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp); 1636 return (1); 1637 } 1638 1639 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1640 blp = HASH2BUCKETLOCK(hash); 1641 retry: 1642 if (CK_SLIST_EMPTY(NCHHASH(hash))) 1643 goto out_no_entry; 1644 1645 mtx_lock(blp); 1646 1647 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1648 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1649 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1650 break; 1651 } 1652 1653 if (ncp == NULL) { 1654 mtx_unlock(blp); 1655 goto out_no_entry; 1656 } 1657 1658 error = cache_zap_locked_bucket(ncp, cnp, hash, blp); 1659 if (__predict_false(error != 0)) { 1660 zap_bucket_fail++; 1661 goto retry; 1662 } 1663 counter_u64_add(numposzaps, 1); 1664 SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp); 1665 cache_free(ncp); 1666 return (1); 1667 out_no_entry: 1668 counter_u64_add(nummisszap, 1); 1669 SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp); 1670 return (0); 1671 } 1672 1673 static int __noinline 1674 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1675 struct timespec *tsp, int *ticksp) 1676 { 1677 int ltype; 1678 1679 *vpp = dvp; 1680 counter_u64_add(dothits, 1); 1681 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp); 1682 if (tsp != NULL) 1683 timespecclear(tsp); 1684 if (ticksp != NULL) 1685 *ticksp = ticks; 1686 vrefact(*vpp); 1687 /* 1688 * When we lookup "." we still can be asked to lock it 1689 * differently... 1690 */ 1691 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 1692 if (ltype != VOP_ISLOCKED(*vpp)) { 1693 if (ltype == LK_EXCLUSIVE) { 1694 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 1695 if (VN_IS_DOOMED((*vpp))) { 1696 /* forced unmount */ 1697 vrele(*vpp); 1698 *vpp = NULL; 1699 return (ENOENT); 1700 } 1701 } else 1702 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 1703 } 1704 return (-1); 1705 } 1706 1707 static int __noinline 1708 cache_lookup_dotdot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1709 struct timespec *tsp, int *ticksp) 1710 { 1711 struct namecache_ts *ncp_ts; 1712 struct namecache *ncp; 1713 struct mtx *dvlp; 1714 enum vgetstate vs; 1715 int error, ltype; 1716 bool whiteout; 1717 1718 MPASS((cnp->cn_flags & ISDOTDOT) != 0); 1719 1720 if ((cnp->cn_flags & MAKEENTRY) == 0) { 1721 cache_remove_cnp(dvp, cnp); 1722 return (0); 1723 } 1724 1725 counter_u64_add(dotdothits, 1); 1726 retry: 1727 dvlp = VP2VNODELOCK(dvp); 1728 mtx_lock(dvlp); 1729 ncp = dvp->v_cache_dd; 1730 if (ncp == NULL) { 1731 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, "..", NULL); 1732 mtx_unlock(dvlp); 1733 return (0); 1734 } 1735 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1736 if (ncp->nc_flag & NCF_NEGATIVE) 1737 *vpp = NULL; 1738 else 1739 *vpp = ncp->nc_vp; 1740 } else 1741 *vpp = ncp->nc_dvp; 1742 if (*vpp == NULL) 1743 goto negative_success; 1744 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", *vpp); 1745 cache_out_ts(ncp, tsp, ticksp); 1746 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 1747 NCF_DTS && tsp != NULL) { 1748 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 1749 *tsp = ncp_ts->nc_dotdottime; 1750 } 1751 1752 MPASS(dvp != *vpp); 1753 ltype = VOP_ISLOCKED(dvp); 1754 VOP_UNLOCK(dvp); 1755 vs = vget_prep(*vpp); 1756 mtx_unlock(dvlp); 1757 error = vget_finish(*vpp, cnp->cn_lkflags, vs); 1758 vn_lock(dvp, ltype | LK_RETRY); 1759 if (VN_IS_DOOMED(dvp)) { 1760 if (error == 0) 1761 vput(*vpp); 1762 *vpp = NULL; 1763 return (ENOENT); 1764 } 1765 if (error) { 1766 *vpp = NULL; 1767 goto retry; 1768 } 1769 return (-1); 1770 negative_success: 1771 if (__predict_false(cnp->cn_nameiop == CREATE)) { 1772 if (cnp->cn_flags & ISLASTCN) { 1773 counter_u64_add(numnegzaps, 1); 1774 cache_zap_negative_locked_vnode_kl(ncp, dvp); 1775 mtx_unlock(dvlp); 1776 cache_free(ncp); 1777 return (0); 1778 } 1779 } 1780 1781 whiteout = (ncp->nc_flag & NCF_WHITE); 1782 cache_out_ts(ncp, tsp, ticksp); 1783 if (cache_neg_hit_prep(ncp)) 1784 cache_neg_promote(ncp); 1785 else 1786 cache_neg_hit_finish(ncp); 1787 mtx_unlock(dvlp); 1788 if (whiteout) 1789 cnp->cn_flags |= ISWHITEOUT; 1790 return (ENOENT); 1791 } 1792 1793 /** 1794 * Lookup a name in the name cache 1795 * 1796 * # Arguments 1797 * 1798 * - dvp: Parent directory in which to search. 1799 * - vpp: Return argument. Will contain desired vnode on cache hit. 1800 * - cnp: Parameters of the name search. The most interesting bits of 1801 * the cn_flags field have the following meanings: 1802 * - MAKEENTRY: If clear, free an entry from the cache rather than look 1803 * it up. 1804 * - ISDOTDOT: Must be set if and only if cn_nameptr == ".." 1805 * - tsp: Return storage for cache timestamp. On a successful (positive 1806 * or negative) lookup, tsp will be filled with any timespec that 1807 * was stored when this cache entry was created. However, it will 1808 * be clear for "." entries. 1809 * - ticks: Return storage for alternate cache timestamp. On a successful 1810 * (positive or negative) lookup, it will contain the ticks value 1811 * that was current when the cache entry was created, unless cnp 1812 * was ".". 1813 * 1814 * Either both tsp and ticks have to be provided or neither of them. 1815 * 1816 * # Returns 1817 * 1818 * - -1: A positive cache hit. vpp will contain the desired vnode. 1819 * - ENOENT: A negative cache hit, or dvp was recycled out from under us due 1820 * to a forced unmount. vpp will not be modified. If the entry 1821 * is a whiteout, then the ISWHITEOUT flag will be set in 1822 * cnp->cn_flags. 1823 * - 0: A cache miss. vpp will not be modified. 1824 * 1825 * # Locking 1826 * 1827 * On a cache hit, vpp will be returned locked and ref'd. If we're looking up 1828 * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the 1829 * lock is not recursively acquired. 1830 */ 1831 static int __noinline 1832 cache_lookup_fallback(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1833 struct timespec *tsp, int *ticksp) 1834 { 1835 struct namecache *ncp; 1836 struct mtx *blp; 1837 uint32_t hash; 1838 enum vgetstate vs; 1839 int error; 1840 bool whiteout; 1841 1842 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 1843 MPASS((cnp->cn_flags & (MAKEENTRY | NC_KEEPPOSENTRY)) != 0); 1844 1845 retry: 1846 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1847 blp = HASH2BUCKETLOCK(hash); 1848 mtx_lock(blp); 1849 1850 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1851 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1852 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1853 break; 1854 } 1855 1856 if (__predict_false(ncp == NULL)) { 1857 mtx_unlock(blp); 1858 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 1859 NULL); 1860 counter_u64_add(nummiss, 1); 1861 return (0); 1862 } 1863 1864 if (ncp->nc_flag & NCF_NEGATIVE) 1865 goto negative_success; 1866 1867 counter_u64_add(numposhits, 1); 1868 *vpp = ncp->nc_vp; 1869 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp); 1870 cache_out_ts(ncp, tsp, ticksp); 1871 MPASS(dvp != *vpp); 1872 vs = vget_prep(*vpp); 1873 mtx_unlock(blp); 1874 error = vget_finish(*vpp, cnp->cn_lkflags, vs); 1875 if (error) { 1876 *vpp = NULL; 1877 goto retry; 1878 } 1879 return (-1); 1880 negative_success: 1881 /* 1882 * We don't get here with regular lookup apart from corner cases. 1883 */ 1884 if (__predict_true(cnp->cn_nameiop == CREATE)) { 1885 if (cnp->cn_flags & ISLASTCN) { 1886 counter_u64_add(numnegzaps, 1); 1887 error = cache_zap_locked_bucket(ncp, cnp, hash, blp); 1888 if (__predict_false(error != 0)) { 1889 zap_bucket_fail2++; 1890 goto retry; 1891 } 1892 cache_free(ncp); 1893 return (0); 1894 } 1895 } 1896 1897 whiteout = (ncp->nc_flag & NCF_WHITE); 1898 cache_out_ts(ncp, tsp, ticksp); 1899 if (cache_neg_hit_prep(ncp)) 1900 cache_neg_promote(ncp); 1901 else 1902 cache_neg_hit_finish(ncp); 1903 mtx_unlock(blp); 1904 if (whiteout) 1905 cnp->cn_flags |= ISWHITEOUT; 1906 return (ENOENT); 1907 } 1908 1909 int 1910 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1911 struct timespec *tsp, int *ticksp) 1912 { 1913 struct namecache *ncp; 1914 uint32_t hash; 1915 enum vgetstate vs; 1916 int error; 1917 bool whiteout, neg_promote; 1918 u_short nc_flag; 1919 1920 MPASS((tsp == NULL && ticksp == NULL) || (tsp != NULL && ticksp != NULL)); 1921 1922 #ifdef DEBUG_CACHE 1923 if (__predict_false(!doingcache)) { 1924 cnp->cn_flags &= ~MAKEENTRY; 1925 return (0); 1926 } 1927 #endif 1928 1929 if (__predict_false(cnp->cn_nameptr[0] == '.')) { 1930 if (cnp->cn_namelen == 1) 1931 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp)); 1932 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') 1933 return (cache_lookup_dotdot(dvp, vpp, cnp, tsp, ticksp)); 1934 } 1935 1936 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 1937 1938 if ((cnp->cn_flags & (MAKEENTRY | NC_KEEPPOSENTRY)) == 0) { 1939 cache_remove_cnp(dvp, cnp); 1940 return (0); 1941 } 1942 1943 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1944 vfs_smr_enter(); 1945 1946 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1947 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1948 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1949 break; 1950 } 1951 1952 if (__predict_false(ncp == NULL)) { 1953 vfs_smr_exit(); 1954 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 1955 NULL); 1956 counter_u64_add(nummiss, 1); 1957 return (0); 1958 } 1959 1960 nc_flag = atomic_load_char(&ncp->nc_flag); 1961 if (nc_flag & NCF_NEGATIVE) 1962 goto negative_success; 1963 1964 counter_u64_add(numposhits, 1); 1965 *vpp = ncp->nc_vp; 1966 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp); 1967 cache_out_ts(ncp, tsp, ticksp); 1968 MPASS(dvp != *vpp); 1969 if (!cache_ncp_canuse(ncp)) { 1970 vfs_smr_exit(); 1971 *vpp = NULL; 1972 goto out_fallback; 1973 } 1974 vs = vget_prep_smr(*vpp); 1975 vfs_smr_exit(); 1976 if (__predict_false(vs == VGET_NONE)) { 1977 *vpp = NULL; 1978 goto out_fallback; 1979 } 1980 error = vget_finish(*vpp, cnp->cn_lkflags, vs); 1981 if (error) { 1982 *vpp = NULL; 1983 goto out_fallback; 1984 } 1985 return (-1); 1986 negative_success: 1987 if (cnp->cn_nameiop == CREATE) { 1988 if (cnp->cn_flags & ISLASTCN) { 1989 vfs_smr_exit(); 1990 goto out_fallback; 1991 } 1992 } 1993 1994 cache_out_ts(ncp, tsp, ticksp); 1995 whiteout = (ncp->nc_flag & NCF_WHITE); 1996 neg_promote = cache_neg_hit_prep(ncp); 1997 if (!cache_ncp_canuse(ncp)) { 1998 cache_neg_hit_abort(ncp); 1999 vfs_smr_exit(); 2000 goto out_fallback; 2001 } 2002 if (neg_promote) { 2003 vfs_smr_exit(); 2004 if (!cache_neg_promote_cond(dvp, cnp, ncp, hash)) 2005 goto out_fallback; 2006 } else { 2007 cache_neg_hit_finish(ncp); 2008 vfs_smr_exit(); 2009 } 2010 if (whiteout) 2011 cnp->cn_flags |= ISWHITEOUT; 2012 return (ENOENT); 2013 out_fallback: 2014 return (cache_lookup_fallback(dvp, vpp, cnp, tsp, ticksp)); 2015 } 2016 2017 struct celockstate { 2018 struct mtx *vlp[3]; 2019 struct mtx *blp[2]; 2020 }; 2021 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3)); 2022 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2)); 2023 2024 static inline void 2025 cache_celockstate_init(struct celockstate *cel) 2026 { 2027 2028 bzero(cel, sizeof(*cel)); 2029 } 2030 2031 static void 2032 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp, 2033 struct vnode *dvp) 2034 { 2035 struct mtx *vlp1, *vlp2; 2036 2037 MPASS(cel->vlp[0] == NULL); 2038 MPASS(cel->vlp[1] == NULL); 2039 MPASS(cel->vlp[2] == NULL); 2040 2041 MPASS(vp != NULL || dvp != NULL); 2042 2043 vlp1 = VP2VNODELOCK(vp); 2044 vlp2 = VP2VNODELOCK(dvp); 2045 cache_sort_vnodes(&vlp1, &vlp2); 2046 2047 if (vlp1 != NULL) { 2048 mtx_lock(vlp1); 2049 cel->vlp[0] = vlp1; 2050 } 2051 mtx_lock(vlp2); 2052 cel->vlp[1] = vlp2; 2053 } 2054 2055 static void 2056 cache_unlock_vnodes_cel(struct celockstate *cel) 2057 { 2058 2059 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL); 2060 2061 if (cel->vlp[0] != NULL) 2062 mtx_unlock(cel->vlp[0]); 2063 if (cel->vlp[1] != NULL) 2064 mtx_unlock(cel->vlp[1]); 2065 if (cel->vlp[2] != NULL) 2066 mtx_unlock(cel->vlp[2]); 2067 } 2068 2069 static bool 2070 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp) 2071 { 2072 struct mtx *vlp; 2073 bool ret; 2074 2075 cache_assert_vlp_locked(cel->vlp[0]); 2076 cache_assert_vlp_locked(cel->vlp[1]); 2077 MPASS(cel->vlp[2] == NULL); 2078 2079 MPASS(vp != NULL); 2080 vlp = VP2VNODELOCK(vp); 2081 2082 ret = true; 2083 if (vlp >= cel->vlp[1]) { 2084 mtx_lock(vlp); 2085 } else { 2086 if (mtx_trylock(vlp)) 2087 goto out; 2088 cache_lock_vnodes_cel_3_failures++; 2089 cache_unlock_vnodes_cel(cel); 2090 if (vlp < cel->vlp[0]) { 2091 mtx_lock(vlp); 2092 mtx_lock(cel->vlp[0]); 2093 mtx_lock(cel->vlp[1]); 2094 } else { 2095 if (cel->vlp[0] != NULL) 2096 mtx_lock(cel->vlp[0]); 2097 mtx_lock(vlp); 2098 mtx_lock(cel->vlp[1]); 2099 } 2100 ret = false; 2101 } 2102 out: 2103 cel->vlp[2] = vlp; 2104 return (ret); 2105 } 2106 2107 static void 2108 cache_lock_buckets_cel(struct celockstate *cel, struct mtx *blp1, 2109 struct mtx *blp2) 2110 { 2111 2112 MPASS(cel->blp[0] == NULL); 2113 MPASS(cel->blp[1] == NULL); 2114 2115 cache_sort_vnodes(&blp1, &blp2); 2116 2117 if (blp1 != NULL) { 2118 mtx_lock(blp1); 2119 cel->blp[0] = blp1; 2120 } 2121 mtx_lock(blp2); 2122 cel->blp[1] = blp2; 2123 } 2124 2125 static void 2126 cache_unlock_buckets_cel(struct celockstate *cel) 2127 { 2128 2129 if (cel->blp[0] != NULL) 2130 mtx_unlock(cel->blp[0]); 2131 mtx_unlock(cel->blp[1]); 2132 } 2133 2134 /* 2135 * Lock part of the cache affected by the insertion. 2136 * 2137 * This means vnodelocks for dvp, vp and the relevant bucketlock. 2138 * However, insertion can result in removal of an old entry. In this 2139 * case we have an additional vnode and bucketlock pair to lock. 2140 * 2141 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while 2142 * preserving the locking order (smaller address first). 2143 */ 2144 static void 2145 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 2146 uint32_t hash) 2147 { 2148 struct namecache *ncp; 2149 struct mtx *blps[2]; 2150 2151 blps[0] = HASH2BUCKETLOCK(hash); 2152 for (;;) { 2153 blps[1] = NULL; 2154 cache_lock_vnodes_cel(cel, dvp, vp); 2155 if (vp == NULL || vp->v_type != VDIR) 2156 break; 2157 ncp = vp->v_cache_dd; 2158 if (ncp == NULL) 2159 break; 2160 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 2161 break; 2162 MPASS(ncp->nc_dvp == vp); 2163 blps[1] = NCP2BUCKETLOCK(ncp); 2164 if (ncp->nc_flag & NCF_NEGATIVE) 2165 break; 2166 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 2167 break; 2168 /* 2169 * All vnodes got re-locked. Re-validate the state and if 2170 * nothing changed we are done. Otherwise restart. 2171 */ 2172 if (ncp == vp->v_cache_dd && 2173 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 2174 blps[1] == NCP2BUCKETLOCK(ncp) && 2175 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 2176 break; 2177 cache_unlock_vnodes_cel(cel); 2178 cel->vlp[0] = NULL; 2179 cel->vlp[1] = NULL; 2180 cel->vlp[2] = NULL; 2181 } 2182 cache_lock_buckets_cel(cel, blps[0], blps[1]); 2183 } 2184 2185 static void 2186 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 2187 uint32_t hash) 2188 { 2189 struct namecache *ncp; 2190 struct mtx *blps[2]; 2191 2192 blps[0] = HASH2BUCKETLOCK(hash); 2193 for (;;) { 2194 blps[1] = NULL; 2195 cache_lock_vnodes_cel(cel, dvp, vp); 2196 ncp = dvp->v_cache_dd; 2197 if (ncp == NULL) 2198 break; 2199 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 2200 break; 2201 MPASS(ncp->nc_dvp == dvp); 2202 blps[1] = NCP2BUCKETLOCK(ncp); 2203 if (ncp->nc_flag & NCF_NEGATIVE) 2204 break; 2205 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 2206 break; 2207 if (ncp == dvp->v_cache_dd && 2208 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 2209 blps[1] == NCP2BUCKETLOCK(ncp) && 2210 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 2211 break; 2212 cache_unlock_vnodes_cel(cel); 2213 cel->vlp[0] = NULL; 2214 cel->vlp[1] = NULL; 2215 cel->vlp[2] = NULL; 2216 } 2217 cache_lock_buckets_cel(cel, blps[0], blps[1]); 2218 } 2219 2220 static void 2221 cache_enter_unlock(struct celockstate *cel) 2222 { 2223 2224 cache_unlock_buckets_cel(cel); 2225 cache_unlock_vnodes_cel(cel); 2226 } 2227 2228 static void __noinline 2229 cache_enter_dotdot_prep(struct vnode *dvp, struct vnode *vp, 2230 struct componentname *cnp) 2231 { 2232 struct celockstate cel; 2233 struct namecache *ncp; 2234 uint32_t hash; 2235 int len; 2236 2237 if (dvp->v_cache_dd == NULL) 2238 return; 2239 len = cnp->cn_namelen; 2240 cache_celockstate_init(&cel); 2241 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 2242 cache_enter_lock_dd(&cel, dvp, vp, hash); 2243 vn_seqc_write_begin(dvp); 2244 ncp = dvp->v_cache_dd; 2245 if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT)) { 2246 KASSERT(ncp->nc_dvp == dvp, ("wrong isdotdot parent")); 2247 cache_zap_locked(ncp); 2248 } else { 2249 ncp = NULL; 2250 } 2251 dvp->v_cache_dd = NULL; 2252 vn_seqc_write_end(dvp); 2253 cache_enter_unlock(&cel); 2254 if (ncp != NULL) 2255 cache_free(ncp); 2256 } 2257 2258 /* 2259 * Add an entry to the cache. 2260 */ 2261 void 2262 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, 2263 struct timespec *tsp, struct timespec *dtsp) 2264 { 2265 struct celockstate cel; 2266 struct namecache *ncp, *n2, *ndd; 2267 struct namecache_ts *ncp_ts; 2268 struct nchashhead *ncpp; 2269 uint32_t hash; 2270 int flag; 2271 int len; 2272 2273 KASSERT(cnp->cn_namelen <= NAME_MAX, 2274 ("%s: passed len %ld exceeds NAME_MAX (%d)", __func__, cnp->cn_namelen, 2275 NAME_MAX)); 2276 VNPASS(dvp != vp, dvp); 2277 VNPASS(!VN_IS_DOOMED(dvp), dvp); 2278 VNPASS(dvp->v_type != VNON, dvp); 2279 if (vp != NULL) { 2280 VNPASS(!VN_IS_DOOMED(vp), vp); 2281 VNPASS(vp->v_type != VNON, vp); 2282 } 2283 2284 #ifdef DEBUG_CACHE 2285 if (__predict_false(!doingcache)) 2286 return; 2287 #endif 2288 2289 flag = 0; 2290 if (__predict_false(cnp->cn_nameptr[0] == '.')) { 2291 if (cnp->cn_namelen == 1) 2292 return; 2293 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 2294 cache_enter_dotdot_prep(dvp, vp, cnp); 2295 flag = NCF_ISDOTDOT; 2296 } 2297 } 2298 2299 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 2300 if (ncp == NULL) 2301 return; 2302 2303 cache_celockstate_init(&cel); 2304 ndd = NULL; 2305 ncp_ts = NULL; 2306 2307 /* 2308 * Calculate the hash key and setup as much of the new 2309 * namecache entry as possible before acquiring the lock. 2310 */ 2311 ncp->nc_flag = flag | NCF_WIP; 2312 ncp->nc_vp = vp; 2313 if (vp == NULL) 2314 cache_neg_init(ncp); 2315 ncp->nc_dvp = dvp; 2316 if (tsp != NULL) { 2317 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 2318 ncp_ts->nc_time = *tsp; 2319 ncp_ts->nc_ticks = ticks; 2320 ncp_ts->nc_nc.nc_flag |= NCF_TS; 2321 if (dtsp != NULL) { 2322 ncp_ts->nc_dotdottime = *dtsp; 2323 ncp_ts->nc_nc.nc_flag |= NCF_DTS; 2324 } 2325 } 2326 len = ncp->nc_nlen = cnp->cn_namelen; 2327 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 2328 memcpy(ncp->nc_name, cnp->cn_nameptr, len); 2329 ncp->nc_name[len] = '\0'; 2330 cache_enter_lock(&cel, dvp, vp, hash); 2331 2332 /* 2333 * See if this vnode or negative entry is already in the cache 2334 * with this name. This can happen with concurrent lookups of 2335 * the same path name. 2336 */ 2337 ncpp = NCHHASH(hash); 2338 CK_SLIST_FOREACH(n2, ncpp, nc_hash) { 2339 if (n2->nc_dvp == dvp && 2340 n2->nc_nlen == cnp->cn_namelen && 2341 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) { 2342 MPASS(cache_ncp_canuse(n2)); 2343 if ((n2->nc_flag & NCF_NEGATIVE) != 0) 2344 KASSERT(vp == NULL, 2345 ("%s: found entry pointing to a different vnode (%p != %p)", 2346 __func__, NULL, vp)); 2347 else 2348 KASSERT(n2->nc_vp == vp, 2349 ("%s: found entry pointing to a different vnode (%p != %p)", 2350 __func__, n2->nc_vp, vp)); 2351 /* 2352 * Entries are supposed to be immutable unless in the 2353 * process of getting destroyed. Accommodating for 2354 * changing timestamps is possible but not worth it. 2355 * This should be harmless in terms of correctness, in 2356 * the worst case resulting in an earlier expiration. 2357 * Alternatively, the found entry can be replaced 2358 * altogether. 2359 */ 2360 MPASS((n2->nc_flag & (NCF_TS | NCF_DTS)) == (ncp->nc_flag & (NCF_TS | NCF_DTS))); 2361 #if 0 2362 if (tsp != NULL) { 2363 KASSERT((n2->nc_flag & NCF_TS) != 0, 2364 ("no NCF_TS")); 2365 n2_ts = __containerof(n2, struct namecache_ts, nc_nc); 2366 n2_ts->nc_time = ncp_ts->nc_time; 2367 n2_ts->nc_ticks = ncp_ts->nc_ticks; 2368 if (dtsp != NULL) { 2369 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime; 2370 n2_ts->nc_nc.nc_flag |= NCF_DTS; 2371 } 2372 } 2373 #endif 2374 SDT_PROBE3(vfs, namecache, enter, duplicate, dvp, ncp->nc_name, 2375 vp); 2376 goto out_unlock_free; 2377 } 2378 } 2379 2380 if (flag == NCF_ISDOTDOT) { 2381 /* 2382 * See if we are trying to add .. entry, but some other lookup 2383 * has populated v_cache_dd pointer already. 2384 */ 2385 if (dvp->v_cache_dd != NULL) 2386 goto out_unlock_free; 2387 KASSERT(vp == NULL || vp->v_type == VDIR, 2388 ("wrong vnode type %p", vp)); 2389 vn_seqc_write_begin(dvp); 2390 dvp->v_cache_dd = ncp; 2391 vn_seqc_write_end(dvp); 2392 } 2393 2394 if (vp != NULL) { 2395 if (flag != NCF_ISDOTDOT) { 2396 /* 2397 * For this case, the cache entry maps both the 2398 * directory name in it and the name ".." for the 2399 * directory's parent. 2400 */ 2401 vn_seqc_write_begin(vp); 2402 if ((ndd = vp->v_cache_dd) != NULL) { 2403 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0) 2404 cache_zap_locked(ndd); 2405 else 2406 ndd = NULL; 2407 } 2408 vp->v_cache_dd = ncp; 2409 vn_seqc_write_end(vp); 2410 } else if (vp->v_type != VDIR) { 2411 if (vp->v_cache_dd != NULL) { 2412 vn_seqc_write_begin(vp); 2413 vp->v_cache_dd = NULL; 2414 vn_seqc_write_end(vp); 2415 } 2416 } 2417 } 2418 2419 if (flag != NCF_ISDOTDOT) { 2420 if (LIST_EMPTY(&dvp->v_cache_src)) { 2421 cache_hold_vnode(dvp); 2422 } 2423 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 2424 } 2425 2426 /* 2427 * If the entry is "negative", we place it into the 2428 * "negative" cache queue, otherwise, we place it into the 2429 * destination vnode's cache entries queue. 2430 */ 2431 if (vp != NULL) { 2432 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 2433 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name, 2434 vp); 2435 } else { 2436 if (cnp->cn_flags & ISWHITEOUT) 2437 ncp->nc_flag |= NCF_WHITE; 2438 cache_neg_insert(ncp); 2439 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp, 2440 ncp->nc_name); 2441 } 2442 2443 /* 2444 * Insert the new namecache entry into the appropriate chain 2445 * within the cache entries table. 2446 */ 2447 CK_SLIST_INSERT_HEAD(ncpp, ncp, nc_hash); 2448 2449 atomic_thread_fence_rel(); 2450 /* 2451 * Mark the entry as fully constructed. 2452 * It is immutable past this point until its removal. 2453 */ 2454 atomic_store_char(&ncp->nc_flag, ncp->nc_flag & ~NCF_WIP); 2455 2456 cache_enter_unlock(&cel); 2457 if (ndd != NULL) 2458 cache_free(ndd); 2459 return; 2460 out_unlock_free: 2461 cache_enter_unlock(&cel); 2462 cache_free(ncp); 2463 return; 2464 } 2465 2466 static u_int 2467 cache_roundup_2(u_int val) 2468 { 2469 u_int res; 2470 2471 for (res = 1; res <= val; res <<= 1) 2472 continue; 2473 2474 return (res); 2475 } 2476 2477 static struct nchashhead * 2478 nchinittbl(u_long elements, u_long *hashmask) 2479 { 2480 struct nchashhead *hashtbl; 2481 u_long hashsize, i; 2482 2483 hashsize = cache_roundup_2(elements) / 2; 2484 2485 hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), M_VFSCACHE, M_WAITOK); 2486 for (i = 0; i < hashsize; i++) 2487 CK_SLIST_INIT(&hashtbl[i]); 2488 *hashmask = hashsize - 1; 2489 return (hashtbl); 2490 } 2491 2492 static void 2493 ncfreetbl(struct nchashhead *hashtbl) 2494 { 2495 2496 free(hashtbl, M_VFSCACHE); 2497 } 2498 2499 /* 2500 * Name cache initialization, from vfs_init() when we are booting 2501 */ 2502 static void 2503 nchinit(void *dummy __unused) 2504 { 2505 u_int i; 2506 2507 cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL_SIZE, 2508 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2509 cache_zone_small_ts = uma_zcreate("STS VFS Cache", CACHE_ZONE_SMALL_TS_SIZE, 2510 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2511 cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE_SIZE, 2512 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2513 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", CACHE_ZONE_LARGE_TS_SIZE, 2514 NULL, NULL, NULL, NULL, CACHE_ZONE_ALIGNMENT, UMA_ZONE_ZINIT); 2515 2516 VFS_SMR_ZONE_SET(cache_zone_small); 2517 VFS_SMR_ZONE_SET(cache_zone_small_ts); 2518 VFS_SMR_ZONE_SET(cache_zone_large); 2519 VFS_SMR_ZONE_SET(cache_zone_large_ts); 2520 2521 ncsize = desiredvnodes * ncsizefactor; 2522 cache_recalc_neg_min(ncnegminpct); 2523 nchashtbl = nchinittbl(desiredvnodes * 2, &nchash); 2524 ncbuckethash = cache_roundup_2(mp_ncpus * mp_ncpus) - 1; 2525 if (ncbuckethash < 7) /* arbitrarily chosen to avoid having one lock */ 2526 ncbuckethash = 7; 2527 if (ncbuckethash > nchash) 2528 ncbuckethash = nchash; 2529 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE, 2530 M_WAITOK | M_ZERO); 2531 for (i = 0; i < numbucketlocks; i++) 2532 mtx_init(&bucketlocks[i], "ncbuc", NULL, MTX_DUPOK | MTX_RECURSE); 2533 ncvnodehash = ncbuckethash; 2534 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE, 2535 M_WAITOK | M_ZERO); 2536 for (i = 0; i < numvnodelocks; i++) 2537 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE); 2538 2539 for (i = 0; i < numneglists; i++) { 2540 mtx_init(&neglists[i].nl_evict_lock, "ncnege", NULL, MTX_DEF); 2541 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF); 2542 TAILQ_INIT(&neglists[i].nl_list); 2543 TAILQ_INIT(&neglists[i].nl_hotlist); 2544 } 2545 } 2546 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 2547 2548 void 2549 cache_vnode_init(struct vnode *vp) 2550 { 2551 2552 LIST_INIT(&vp->v_cache_src); 2553 TAILQ_INIT(&vp->v_cache_dst); 2554 vp->v_cache_dd = NULL; 2555 cache_prehash(vp); 2556 } 2557 2558 void 2559 cache_changesize(u_long newmaxvnodes) 2560 { 2561 struct nchashhead *new_nchashtbl, *old_nchashtbl; 2562 u_long new_nchash, old_nchash; 2563 struct namecache *ncp; 2564 uint32_t hash; 2565 u_long newncsize; 2566 int i; 2567 2568 newncsize = newmaxvnodes * ncsizefactor; 2569 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2); 2570 if (newmaxvnodes < numbucketlocks) 2571 newmaxvnodes = numbucketlocks; 2572 2573 new_nchashtbl = nchinittbl(newmaxvnodes, &new_nchash); 2574 /* If same hash table size, nothing to do */ 2575 if (nchash == new_nchash) { 2576 ncfreetbl(new_nchashtbl); 2577 return; 2578 } 2579 /* 2580 * Move everything from the old hash table to the new table. 2581 * None of the namecache entries in the table can be removed 2582 * because to do so, they have to be removed from the hash table. 2583 */ 2584 cache_lock_all_vnodes(); 2585 cache_lock_all_buckets(); 2586 old_nchashtbl = nchashtbl; 2587 old_nchash = nchash; 2588 nchashtbl = new_nchashtbl; 2589 nchash = new_nchash; 2590 for (i = 0; i <= old_nchash; i++) { 2591 while ((ncp = CK_SLIST_FIRST(&old_nchashtbl[i])) != NULL) { 2592 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, 2593 ncp->nc_dvp); 2594 CK_SLIST_REMOVE(&old_nchashtbl[i], ncp, namecache, nc_hash); 2595 CK_SLIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash); 2596 } 2597 } 2598 ncsize = newncsize; 2599 cache_recalc_neg_min(ncnegminpct); 2600 cache_unlock_all_buckets(); 2601 cache_unlock_all_vnodes(); 2602 ncfreetbl(old_nchashtbl); 2603 } 2604 2605 /* 2606 * Invalidate all entries from and to a particular vnode. 2607 */ 2608 static void 2609 cache_purge_impl(struct vnode *vp) 2610 { 2611 struct cache_freebatch batch; 2612 struct namecache *ncp; 2613 struct mtx *vlp, *vlp2; 2614 2615 TAILQ_INIT(&batch); 2616 vlp = VP2VNODELOCK(vp); 2617 vlp2 = NULL; 2618 mtx_lock(vlp); 2619 retry: 2620 while (!LIST_EMPTY(&vp->v_cache_src)) { 2621 ncp = LIST_FIRST(&vp->v_cache_src); 2622 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2623 goto retry; 2624 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2625 } 2626 while (!TAILQ_EMPTY(&vp->v_cache_dst)) { 2627 ncp = TAILQ_FIRST(&vp->v_cache_dst); 2628 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2629 goto retry; 2630 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2631 } 2632 ncp = vp->v_cache_dd; 2633 if (ncp != NULL) { 2634 KASSERT(ncp->nc_flag & NCF_ISDOTDOT, 2635 ("lost dotdot link")); 2636 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2637 goto retry; 2638 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2639 } 2640 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 2641 mtx_unlock(vlp); 2642 if (vlp2 != NULL) 2643 mtx_unlock(vlp2); 2644 cache_free_batch(&batch); 2645 } 2646 2647 /* 2648 * Opportunistic check to see if there is anything to do. 2649 */ 2650 static bool 2651 cache_has_entries(struct vnode *vp) 2652 { 2653 2654 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) && 2655 vp->v_cache_dd == NULL) 2656 return (false); 2657 return (true); 2658 } 2659 2660 void 2661 cache_purge(struct vnode *vp) 2662 { 2663 2664 SDT_PROBE1(vfs, namecache, purge, done, vp); 2665 if (!cache_has_entries(vp)) 2666 return; 2667 cache_purge_impl(vp); 2668 } 2669 2670 /* 2671 * Only to be used by vgone. 2672 */ 2673 void 2674 cache_purge_vgone(struct vnode *vp) 2675 { 2676 struct mtx *vlp; 2677 2678 VNPASS(VN_IS_DOOMED(vp), vp); 2679 if (cache_has_entries(vp)) { 2680 cache_purge_impl(vp); 2681 return; 2682 } 2683 2684 /* 2685 * Serialize against a potential thread doing cache_purge. 2686 */ 2687 vlp = VP2VNODELOCK(vp); 2688 mtx_wait_unlocked(vlp); 2689 if (cache_has_entries(vp)) { 2690 cache_purge_impl(vp); 2691 return; 2692 } 2693 return; 2694 } 2695 2696 /* 2697 * Invalidate all negative entries for a particular directory vnode. 2698 */ 2699 void 2700 cache_purge_negative(struct vnode *vp) 2701 { 2702 struct cache_freebatch batch; 2703 struct namecache *ncp, *nnp; 2704 struct mtx *vlp; 2705 2706 SDT_PROBE1(vfs, namecache, purge_negative, done, vp); 2707 if (LIST_EMPTY(&vp->v_cache_src)) 2708 return; 2709 TAILQ_INIT(&batch); 2710 vlp = VP2VNODELOCK(vp); 2711 mtx_lock(vlp); 2712 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) { 2713 if (!(ncp->nc_flag & NCF_NEGATIVE)) 2714 continue; 2715 cache_zap_negative_locked_vnode_kl(ncp, vp); 2716 TAILQ_INSERT_TAIL(&batch, ncp, nc_dst); 2717 } 2718 mtx_unlock(vlp); 2719 cache_free_batch(&batch); 2720 } 2721 2722 /* 2723 * Entry points for modifying VOP operations. 2724 */ 2725 void 2726 cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp, 2727 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp) 2728 { 2729 2730 ASSERT_VOP_IN_SEQC(fdvp); 2731 ASSERT_VOP_IN_SEQC(fvp); 2732 ASSERT_VOP_IN_SEQC(tdvp); 2733 if (tvp != NULL) 2734 ASSERT_VOP_IN_SEQC(tvp); 2735 2736 cache_purge(fvp); 2737 if (tvp != NULL) { 2738 cache_purge(tvp); 2739 KASSERT(!cache_remove_cnp(tdvp, tcnp), 2740 ("%s: lingering negative entry", __func__)); 2741 } else { 2742 cache_remove_cnp(tdvp, tcnp); 2743 } 2744 2745 /* 2746 * TODO 2747 * 2748 * Historically renaming was always purging all revelang entries, 2749 * but that's quite wasteful. In particular turns out that in many cases 2750 * the target file is immediately accessed after rename, inducing a cache 2751 * miss. 2752 * 2753 * Recode this to reduce relocking and reuse the existing entry (if any) 2754 * instead of just removing it above and allocating a new one here. 2755 */ 2756 if (cache_rename_add) { 2757 cache_enter(tdvp, fvp, tcnp); 2758 } 2759 } 2760 2761 void 2762 cache_vop_rmdir(struct vnode *dvp, struct vnode *vp) 2763 { 2764 2765 ASSERT_VOP_IN_SEQC(dvp); 2766 ASSERT_VOP_IN_SEQC(vp); 2767 cache_purge(vp); 2768 } 2769 2770 #ifdef INVARIANTS 2771 /* 2772 * Validate that if an entry exists it matches. 2773 */ 2774 void 2775 cache_validate(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2776 { 2777 struct namecache *ncp; 2778 struct mtx *blp; 2779 uint32_t hash; 2780 2781 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 2782 if (CK_SLIST_EMPTY(NCHHASH(hash))) 2783 return; 2784 blp = HASH2BUCKETLOCK(hash); 2785 mtx_lock(blp); 2786 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 2787 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 2788 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) { 2789 if (ncp->nc_vp != vp) 2790 panic("%s: mismatch (%p != %p); ncp %p [%s] dvp %p vp %p\n", 2791 __func__, vp, ncp->nc_vp, ncp, ncp->nc_name, ncp->nc_dvp, 2792 ncp->nc_vp); 2793 } 2794 } 2795 mtx_unlock(blp); 2796 } 2797 #endif 2798 2799 /* 2800 * Flush all entries referencing a particular filesystem. 2801 */ 2802 void 2803 cache_purgevfs(struct mount *mp) 2804 { 2805 struct vnode *vp, *mvp; 2806 2807 SDT_PROBE1(vfs, namecache, purgevfs, done, mp); 2808 /* 2809 * Somewhat wasteful iteration over all vnodes. Would be better to 2810 * support filtering and avoid the interlock to begin with. 2811 */ 2812 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) { 2813 if (!cache_has_entries(vp)) { 2814 VI_UNLOCK(vp); 2815 continue; 2816 } 2817 vholdl(vp); 2818 VI_UNLOCK(vp); 2819 cache_purge(vp); 2820 vdrop(vp); 2821 } 2822 } 2823 2824 /* 2825 * Perform canonical checks and cache lookup and pass on to filesystem 2826 * through the vop_cachedlookup only if needed. 2827 */ 2828 2829 int 2830 vfs_cache_lookup(struct vop_lookup_args *ap) 2831 { 2832 struct vnode *dvp; 2833 int error; 2834 struct vnode **vpp = ap->a_vpp; 2835 struct componentname *cnp = ap->a_cnp; 2836 int flags = cnp->cn_flags; 2837 2838 *vpp = NULL; 2839 dvp = ap->a_dvp; 2840 2841 if (dvp->v_type != VDIR) 2842 return (ENOTDIR); 2843 2844 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 2845 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 2846 return (EROFS); 2847 2848 error = vn_dir_check_exec(dvp, cnp); 2849 if (error != 0) 2850 return (error); 2851 2852 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 2853 if (error == 0) 2854 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 2855 if (error == -1) 2856 return (0); 2857 return (error); 2858 } 2859 2860 /* Implementation of the getcwd syscall. */ 2861 int 2862 sys___getcwd(struct thread *td, struct __getcwd_args *uap) 2863 { 2864 char *buf, *retbuf; 2865 size_t buflen; 2866 int error; 2867 2868 buflen = uap->buflen; 2869 if (__predict_false(buflen < 2)) 2870 return (EINVAL); 2871 if (buflen > MAXPATHLEN) 2872 buflen = MAXPATHLEN; 2873 2874 buf = uma_zalloc(namei_zone, M_WAITOK); 2875 error = vn_getcwd(buf, &retbuf, &buflen); 2876 if (error == 0) 2877 error = copyout(retbuf, uap->buf, buflen); 2878 uma_zfree(namei_zone, buf); 2879 return (error); 2880 } 2881 2882 int 2883 vn_getcwd(char *buf, char **retbuf, size_t *buflen) 2884 { 2885 struct pwd *pwd; 2886 int error; 2887 2888 vfs_smr_enter(); 2889 pwd = pwd_get_smr(); 2890 error = vn_fullpath_any_smr(pwd->pwd_cdir, pwd->pwd_rdir, buf, retbuf, 2891 buflen, 0); 2892 VFS_SMR_ASSERT_NOT_ENTERED(); 2893 if (error < 0) { 2894 pwd = pwd_hold(curthread); 2895 error = vn_fullpath_any(pwd->pwd_cdir, pwd->pwd_rdir, buf, 2896 retbuf, buflen); 2897 pwd_drop(pwd); 2898 } 2899 2900 #ifdef KTRACE 2901 if (KTRPOINT(curthread, KTR_NAMEI) && error == 0) 2902 ktrnamei(*retbuf); 2903 #endif 2904 return (error); 2905 } 2906 2907 static int 2908 kern___realpathat(struct thread *td, int fd, const char *path, char *buf, 2909 size_t size, int flags, enum uio_seg pathseg) 2910 { 2911 struct nameidata nd; 2912 char *retbuf, *freebuf; 2913 int error; 2914 2915 if (flags != 0) 2916 return (EINVAL); 2917 NDINIT_ATRIGHTS(&nd, LOOKUP, FOLLOW | SAVENAME | WANTPARENT | AUDITVNODE1, 2918 pathseg, path, fd, &cap_fstat_rights, td); 2919 if ((error = namei(&nd)) != 0) 2920 return (error); 2921 error = vn_fullpath_hardlink(&nd, &retbuf, &freebuf, &size); 2922 if (error == 0) { 2923 error = copyout(retbuf, buf, size); 2924 free(freebuf, M_TEMP); 2925 } 2926 NDFREE(&nd, 0); 2927 return (error); 2928 } 2929 2930 int 2931 sys___realpathat(struct thread *td, struct __realpathat_args *uap) 2932 { 2933 2934 return (kern___realpathat(td, uap->fd, uap->path, uap->buf, uap->size, 2935 uap->flags, UIO_USERSPACE)); 2936 } 2937 2938 /* 2939 * Retrieve the full filesystem path that correspond to a vnode from the name 2940 * cache (if available) 2941 */ 2942 int 2943 vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf) 2944 { 2945 struct pwd *pwd; 2946 char *buf; 2947 size_t buflen; 2948 int error; 2949 2950 if (__predict_false(vp == NULL)) 2951 return (EINVAL); 2952 2953 buflen = MAXPATHLEN; 2954 buf = malloc(buflen, M_TEMP, M_WAITOK); 2955 vfs_smr_enter(); 2956 pwd = pwd_get_smr(); 2957 error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, &buflen, 0); 2958 VFS_SMR_ASSERT_NOT_ENTERED(); 2959 if (error < 0) { 2960 pwd = pwd_hold(curthread); 2961 error = vn_fullpath_any(vp, pwd->pwd_rdir, buf, retbuf, &buflen); 2962 pwd_drop(pwd); 2963 } 2964 if (error == 0) 2965 *freebuf = buf; 2966 else 2967 free(buf, M_TEMP); 2968 return (error); 2969 } 2970 2971 /* 2972 * This function is similar to vn_fullpath, but it attempts to lookup the 2973 * pathname relative to the global root mount point. This is required for the 2974 * auditing sub-system, as audited pathnames must be absolute, relative to the 2975 * global root mount point. 2976 */ 2977 int 2978 vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf) 2979 { 2980 char *buf; 2981 size_t buflen; 2982 int error; 2983 2984 if (__predict_false(vp == NULL)) 2985 return (EINVAL); 2986 buflen = MAXPATHLEN; 2987 buf = malloc(buflen, M_TEMP, M_WAITOK); 2988 vfs_smr_enter(); 2989 error = vn_fullpath_any_smr(vp, rootvnode, buf, retbuf, &buflen, 0); 2990 VFS_SMR_ASSERT_NOT_ENTERED(); 2991 if (error < 0) { 2992 error = vn_fullpath_any(vp, rootvnode, buf, retbuf, &buflen); 2993 } 2994 if (error == 0) 2995 *freebuf = buf; 2996 else 2997 free(buf, M_TEMP); 2998 return (error); 2999 } 3000 3001 static struct namecache * 3002 vn_dd_from_dst(struct vnode *vp) 3003 { 3004 struct namecache *ncp; 3005 3006 cache_assert_vnode_locked(vp); 3007 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) { 3008 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 3009 return (ncp); 3010 } 3011 return (NULL); 3012 } 3013 3014 int 3015 vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen) 3016 { 3017 struct vnode *dvp; 3018 struct namecache *ncp; 3019 struct mtx *vlp; 3020 int error; 3021 3022 vlp = VP2VNODELOCK(*vp); 3023 mtx_lock(vlp); 3024 ncp = (*vp)->v_cache_dd; 3025 if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT) == 0) { 3026 KASSERT(ncp == vn_dd_from_dst(*vp), 3027 ("%s: mismatch for dd entry (%p != %p)", __func__, 3028 ncp, vn_dd_from_dst(*vp))); 3029 } else { 3030 ncp = vn_dd_from_dst(*vp); 3031 } 3032 if (ncp != NULL) { 3033 if (*buflen < ncp->nc_nlen) { 3034 mtx_unlock(vlp); 3035 vrele(*vp); 3036 counter_u64_add(numfullpathfail4, 1); 3037 error = ENOMEM; 3038 SDT_PROBE3(vfs, namecache, fullpath, return, error, 3039 vp, NULL); 3040 return (error); 3041 } 3042 *buflen -= ncp->nc_nlen; 3043 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); 3044 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp, 3045 ncp->nc_name, vp); 3046 dvp = *vp; 3047 *vp = ncp->nc_dvp; 3048 vref(*vp); 3049 mtx_unlock(vlp); 3050 vrele(dvp); 3051 return (0); 3052 } 3053 SDT_PROBE1(vfs, namecache, fullpath, miss, vp); 3054 3055 mtx_unlock(vlp); 3056 vn_lock(*vp, LK_SHARED | LK_RETRY); 3057 error = VOP_VPTOCNP(*vp, &dvp, buf, buflen); 3058 vput(*vp); 3059 if (error) { 3060 counter_u64_add(numfullpathfail2, 1); 3061 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 3062 return (error); 3063 } 3064 3065 *vp = dvp; 3066 if (VN_IS_DOOMED(dvp)) { 3067 /* forced unmount */ 3068 vrele(dvp); 3069 error = ENOENT; 3070 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 3071 return (error); 3072 } 3073 /* 3074 * *vp has its use count incremented still. 3075 */ 3076 3077 return (0); 3078 } 3079 3080 /* 3081 * Resolve a directory to a pathname. 3082 * 3083 * The name of the directory can always be found in the namecache or fetched 3084 * from the filesystem. There is also guaranteed to be only one parent, meaning 3085 * we can just follow vnodes up until we find the root. 3086 * 3087 * The vnode must be referenced. 3088 */ 3089 static int 3090 vn_fullpath_dir(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, 3091 size_t *len, size_t addend) 3092 { 3093 #ifdef KDTRACE_HOOKS 3094 struct vnode *startvp = vp; 3095 #endif 3096 struct vnode *vp1; 3097 size_t buflen; 3098 int error; 3099 bool slash_prefixed; 3100 3101 VNPASS(vp->v_type == VDIR || VN_IS_DOOMED(vp), vp); 3102 VNPASS(vp->v_usecount > 0, vp); 3103 3104 buflen = *len; 3105 3106 slash_prefixed = true; 3107 if (addend == 0) { 3108 MPASS(*len >= 2); 3109 buflen--; 3110 buf[buflen] = '\0'; 3111 slash_prefixed = false; 3112 } 3113 3114 error = 0; 3115 3116 SDT_PROBE1(vfs, namecache, fullpath, entry, vp); 3117 counter_u64_add(numfullpathcalls, 1); 3118 while (vp != rdir && vp != rootvnode) { 3119 /* 3120 * The vp vnode must be already fully constructed, 3121 * since it is either found in namecache or obtained 3122 * from VOP_VPTOCNP(). We may test for VV_ROOT safely 3123 * without obtaining the vnode lock. 3124 */ 3125 if ((vp->v_vflag & VV_ROOT) != 0) { 3126 vn_lock(vp, LK_RETRY | LK_SHARED); 3127 3128 /* 3129 * With the vnode locked, check for races with 3130 * unmount, forced or not. Note that we 3131 * already verified that vp is not equal to 3132 * the root vnode, which means that 3133 * mnt_vnodecovered can be NULL only for the 3134 * case of unmount. 3135 */ 3136 if (VN_IS_DOOMED(vp) || 3137 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL || 3138 vp1->v_mountedhere != vp->v_mount) { 3139 vput(vp); 3140 error = ENOENT; 3141 SDT_PROBE3(vfs, namecache, fullpath, return, 3142 error, vp, NULL); 3143 break; 3144 } 3145 3146 vref(vp1); 3147 vput(vp); 3148 vp = vp1; 3149 continue; 3150 } 3151 if (vp->v_type != VDIR) { 3152 vrele(vp); 3153 counter_u64_add(numfullpathfail1, 1); 3154 error = ENOTDIR; 3155 SDT_PROBE3(vfs, namecache, fullpath, return, 3156 error, vp, NULL); 3157 break; 3158 } 3159 error = vn_vptocnp(&vp, buf, &buflen); 3160 if (error) 3161 break; 3162 if (buflen == 0) { 3163 vrele(vp); 3164 error = ENOMEM; 3165 SDT_PROBE3(vfs, namecache, fullpath, return, error, 3166 startvp, NULL); 3167 break; 3168 } 3169 buf[--buflen] = '/'; 3170 slash_prefixed = true; 3171 } 3172 if (error) 3173 return (error); 3174 if (!slash_prefixed) { 3175 if (buflen == 0) { 3176 vrele(vp); 3177 counter_u64_add(numfullpathfail4, 1); 3178 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM, 3179 startvp, NULL); 3180 return (ENOMEM); 3181 } 3182 buf[--buflen] = '/'; 3183 } 3184 counter_u64_add(numfullpathfound, 1); 3185 vrele(vp); 3186 3187 *retbuf = buf + buflen; 3188 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, *retbuf); 3189 *len -= buflen; 3190 *len += addend; 3191 return (0); 3192 } 3193 3194 /* 3195 * Resolve an arbitrary vnode to a pathname. 3196 * 3197 * Note 2 caveats: 3198 * - hardlinks are not tracked, thus if the vnode is not a directory this can 3199 * resolve to a different path than the one used to find it 3200 * - namecache is not mandatory, meaning names are not guaranteed to be added 3201 * (in which case resolving fails) 3202 */ 3203 static void __inline 3204 cache_rev_failed_impl(int *reason, int line) 3205 { 3206 3207 *reason = line; 3208 } 3209 #define cache_rev_failed(var) cache_rev_failed_impl((var), __LINE__) 3210 3211 static int 3212 vn_fullpath_any_smr(struct vnode *vp, struct vnode *rdir, char *buf, 3213 char **retbuf, size_t *buflen, size_t addend) 3214 { 3215 #ifdef KDTRACE_HOOKS 3216 struct vnode *startvp = vp; 3217 #endif 3218 struct vnode *tvp; 3219 struct mount *mp; 3220 struct namecache *ncp; 3221 size_t orig_buflen; 3222 int reason; 3223 int error; 3224 #ifdef KDTRACE_HOOKS 3225 int i; 3226 #endif 3227 seqc_t vp_seqc, tvp_seqc; 3228 u_char nc_flag; 3229 3230 VFS_SMR_ASSERT_ENTERED(); 3231 3232 if (!cache_fast_revlookup) { 3233 vfs_smr_exit(); 3234 return (-1); 3235 } 3236 3237 orig_buflen = *buflen; 3238 3239 if (addend == 0) { 3240 MPASS(*buflen >= 2); 3241 *buflen -= 1; 3242 buf[*buflen] = '\0'; 3243 } 3244 3245 if (vp == rdir || vp == rootvnode) { 3246 if (addend == 0) { 3247 *buflen -= 1; 3248 buf[*buflen] = '/'; 3249 } 3250 goto out_ok; 3251 } 3252 3253 #ifdef KDTRACE_HOOKS 3254 i = 0; 3255 #endif 3256 error = -1; 3257 ncp = NULL; /* for sdt probe down below */ 3258 vp_seqc = vn_seqc_read_any(vp); 3259 if (seqc_in_modify(vp_seqc)) { 3260 cache_rev_failed(&reason); 3261 goto out_abort; 3262 } 3263 3264 for (;;) { 3265 #ifdef KDTRACE_HOOKS 3266 i++; 3267 #endif 3268 if ((vp->v_vflag & VV_ROOT) != 0) { 3269 mp = atomic_load_ptr(&vp->v_mount); 3270 if (mp == NULL) { 3271 cache_rev_failed(&reason); 3272 goto out_abort; 3273 } 3274 tvp = atomic_load_ptr(&mp->mnt_vnodecovered); 3275 tvp_seqc = vn_seqc_read_any(tvp); 3276 if (seqc_in_modify(tvp_seqc)) { 3277 cache_rev_failed(&reason); 3278 goto out_abort; 3279 } 3280 if (!vn_seqc_consistent(vp, vp_seqc)) { 3281 cache_rev_failed(&reason); 3282 goto out_abort; 3283 } 3284 vp = tvp; 3285 vp_seqc = tvp_seqc; 3286 continue; 3287 } 3288 ncp = atomic_load_ptr(&vp->v_cache_dd); 3289 if (ncp == NULL) { 3290 cache_rev_failed(&reason); 3291 goto out_abort; 3292 } 3293 nc_flag = atomic_load_char(&ncp->nc_flag); 3294 if ((nc_flag & NCF_ISDOTDOT) != 0) { 3295 cache_rev_failed(&reason); 3296 goto out_abort; 3297 } 3298 if (!cache_ncp_canuse(ncp)) { 3299 cache_rev_failed(&reason); 3300 goto out_abort; 3301 } 3302 if (ncp->nc_nlen >= *buflen) { 3303 cache_rev_failed(&reason); 3304 error = ENOMEM; 3305 goto out_abort; 3306 } 3307 *buflen -= ncp->nc_nlen; 3308 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); 3309 *buflen -= 1; 3310 buf[*buflen] = '/'; 3311 tvp = ncp->nc_dvp; 3312 tvp_seqc = vn_seqc_read_any(tvp); 3313 if (seqc_in_modify(tvp_seqc)) { 3314 cache_rev_failed(&reason); 3315 goto out_abort; 3316 } 3317 if (!vn_seqc_consistent(vp, vp_seqc)) { 3318 cache_rev_failed(&reason); 3319 goto out_abort; 3320 } 3321 vp = tvp; 3322 vp_seqc = tvp_seqc; 3323 if (vp == rdir || vp == rootvnode) 3324 break; 3325 } 3326 out_ok: 3327 vfs_smr_exit(); 3328 *retbuf = buf + *buflen; 3329 *buflen = orig_buflen - *buflen + addend; 3330 SDT_PROBE2(vfs, namecache, fullpath_smr, hit, startvp, *retbuf); 3331 return (0); 3332 3333 out_abort: 3334 *buflen = orig_buflen; 3335 SDT_PROBE4(vfs, namecache, fullpath_smr, miss, startvp, ncp, reason, i); 3336 vfs_smr_exit(); 3337 return (error); 3338 } 3339 3340 static int 3341 vn_fullpath_any(struct vnode *vp, struct vnode *rdir, char *buf, char **retbuf, 3342 size_t *buflen) 3343 { 3344 size_t orig_buflen, addend; 3345 int error; 3346 3347 if (*buflen < 2) 3348 return (EINVAL); 3349 3350 orig_buflen = *buflen; 3351 3352 vref(vp); 3353 addend = 0; 3354 if (vp->v_type != VDIR) { 3355 *buflen -= 1; 3356 buf[*buflen] = '\0'; 3357 error = vn_vptocnp(&vp, buf, buflen); 3358 if (error) 3359 return (error); 3360 if (*buflen == 0) { 3361 vrele(vp); 3362 return (ENOMEM); 3363 } 3364 *buflen -= 1; 3365 buf[*buflen] = '/'; 3366 addend = orig_buflen - *buflen; 3367 } 3368 3369 return (vn_fullpath_dir(vp, rdir, buf, retbuf, buflen, addend)); 3370 } 3371 3372 /* 3373 * Resolve an arbitrary vnode to a pathname (taking care of hardlinks). 3374 * 3375 * Since the namecache does not track hardlinks, the caller is expected to first 3376 * look up the target vnode with SAVENAME | WANTPARENT flags passed to namei. 3377 * 3378 * Then we have 2 cases: 3379 * - if the found vnode is a directory, the path can be constructed just by 3380 * following names up the chain 3381 * - otherwise we populate the buffer with the saved name and start resolving 3382 * from the parent 3383 */ 3384 static int 3385 vn_fullpath_hardlink(struct nameidata *ndp, char **retbuf, char **freebuf, 3386 size_t *buflen) 3387 { 3388 char *buf, *tmpbuf; 3389 struct pwd *pwd; 3390 struct componentname *cnp; 3391 struct vnode *vp; 3392 size_t addend; 3393 int error; 3394 enum vtype type; 3395 3396 if (*buflen < 2) 3397 return (EINVAL); 3398 if (*buflen > MAXPATHLEN) 3399 *buflen = MAXPATHLEN; 3400 3401 buf = malloc(*buflen, M_TEMP, M_WAITOK); 3402 3403 addend = 0; 3404 vp = ndp->ni_vp; 3405 /* 3406 * Check for VBAD to work around the vp_crossmp bug in lookup(). 3407 * 3408 * For example consider tmpfs on /tmp and realpath /tmp. ni_vp will be 3409 * set to mount point's root vnode while ni_dvp will be vp_crossmp. 3410 * If the type is VDIR (like in this very case) we can skip looking 3411 * at ni_dvp in the first place. However, since vnodes get passed here 3412 * unlocked the target may transition to doomed state (type == VBAD) 3413 * before we get to evaluate the condition. If this happens, we will 3414 * populate part of the buffer and descend to vn_fullpath_dir with 3415 * vp == vp_crossmp. Prevent the problem by checking for VBAD. 3416 * 3417 * This should be atomic_load(&vp->v_type) but it is illegal to take 3418 * an address of a bit field, even if said field is sized to char. 3419 * Work around the problem by reading the value into a full-sized enum 3420 * and then re-reading it with atomic_load which will still prevent 3421 * the compiler from re-reading down the road. 3422 */ 3423 type = vp->v_type; 3424 type = atomic_load_int(&type); 3425 if (type == VBAD) { 3426 error = ENOENT; 3427 goto out_bad; 3428 } 3429 if (type != VDIR) { 3430 cnp = &ndp->ni_cnd; 3431 addend = cnp->cn_namelen + 2; 3432 if (*buflen < addend) { 3433 error = ENOMEM; 3434 goto out_bad; 3435 } 3436 *buflen -= addend; 3437 tmpbuf = buf + *buflen; 3438 tmpbuf[0] = '/'; 3439 memcpy(&tmpbuf[1], cnp->cn_nameptr, cnp->cn_namelen); 3440 tmpbuf[addend - 1] = '\0'; 3441 vp = ndp->ni_dvp; 3442 } 3443 3444 vfs_smr_enter(); 3445 pwd = pwd_get_smr(); 3446 error = vn_fullpath_any_smr(vp, pwd->pwd_rdir, buf, retbuf, buflen, 3447 addend); 3448 VFS_SMR_ASSERT_NOT_ENTERED(); 3449 if (error < 0) { 3450 pwd = pwd_hold(curthread); 3451 vref(vp); 3452 error = vn_fullpath_dir(vp, pwd->pwd_rdir, buf, retbuf, buflen, 3453 addend); 3454 pwd_drop(pwd); 3455 if (error != 0) 3456 goto out_bad; 3457 } 3458 3459 *freebuf = buf; 3460 3461 return (0); 3462 out_bad: 3463 free(buf, M_TEMP); 3464 return (error); 3465 } 3466 3467 struct vnode * 3468 vn_dir_dd_ino(struct vnode *vp) 3469 { 3470 struct namecache *ncp; 3471 struct vnode *ddvp; 3472 struct mtx *vlp; 3473 enum vgetstate vs; 3474 3475 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 3476 vlp = VP2VNODELOCK(vp); 3477 mtx_lock(vlp); 3478 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 3479 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 3480 continue; 3481 ddvp = ncp->nc_dvp; 3482 vs = vget_prep(ddvp); 3483 mtx_unlock(vlp); 3484 if (vget_finish(ddvp, LK_SHARED | LK_NOWAIT, vs)) 3485 return (NULL); 3486 return (ddvp); 3487 } 3488 mtx_unlock(vlp); 3489 return (NULL); 3490 } 3491 3492 int 3493 vn_commname(struct vnode *vp, char *buf, u_int buflen) 3494 { 3495 struct namecache *ncp; 3496 struct mtx *vlp; 3497 int l; 3498 3499 vlp = VP2VNODELOCK(vp); 3500 mtx_lock(vlp); 3501 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 3502 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 3503 break; 3504 if (ncp == NULL) { 3505 mtx_unlock(vlp); 3506 return (ENOENT); 3507 } 3508 l = min(ncp->nc_nlen, buflen - 1); 3509 memcpy(buf, ncp->nc_name, l); 3510 mtx_unlock(vlp); 3511 buf[l] = '\0'; 3512 return (0); 3513 } 3514 3515 /* 3516 * This function updates path string to vnode's full global path 3517 * and checks the size of the new path string against the pathlen argument. 3518 * 3519 * Requires a locked, referenced vnode. 3520 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 3521 * 3522 * If vp is a directory, the call to vn_fullpath_global() always succeeds 3523 * because it falls back to the ".." lookup if the namecache lookup fails. 3524 */ 3525 int 3526 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 3527 u_int pathlen) 3528 { 3529 struct nameidata nd; 3530 struct vnode *vp1; 3531 char *rpath, *fbuf; 3532 int error; 3533 3534 ASSERT_VOP_ELOCKED(vp, __func__); 3535 3536 /* Construct global filesystem path from vp. */ 3537 VOP_UNLOCK(vp); 3538 error = vn_fullpath_global(vp, &rpath, &fbuf); 3539 3540 if (error != 0) { 3541 vrele(vp); 3542 return (error); 3543 } 3544 3545 if (strlen(rpath) >= pathlen) { 3546 vrele(vp); 3547 error = ENAMETOOLONG; 3548 goto out; 3549 } 3550 3551 /* 3552 * Re-lookup the vnode by path to detect a possible rename. 3553 * As a side effect, the vnode is relocked. 3554 * If vnode was renamed, return ENOENT. 3555 */ 3556 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 3557 UIO_SYSSPACE, path, td); 3558 error = namei(&nd); 3559 if (error != 0) { 3560 vrele(vp); 3561 goto out; 3562 } 3563 NDFREE(&nd, NDF_ONLY_PNBUF); 3564 vp1 = nd.ni_vp; 3565 vrele(vp); 3566 if (vp1 == vp) 3567 strcpy(path, rpath); 3568 else { 3569 vput(vp1); 3570 error = ENOENT; 3571 } 3572 3573 out: 3574 free(fbuf, M_TEMP); 3575 return (error); 3576 } 3577 3578 #ifdef DDB 3579 static void 3580 db_print_vpath(struct vnode *vp) 3581 { 3582 3583 while (vp != NULL) { 3584 db_printf("%p: ", vp); 3585 if (vp == rootvnode) { 3586 db_printf("/"); 3587 vp = NULL; 3588 } else { 3589 if (vp->v_vflag & VV_ROOT) { 3590 db_printf("<mount point>"); 3591 vp = vp->v_mount->mnt_vnodecovered; 3592 } else { 3593 struct namecache *ncp; 3594 char *ncn; 3595 int i; 3596 3597 ncp = TAILQ_FIRST(&vp->v_cache_dst); 3598 if (ncp != NULL) { 3599 ncn = ncp->nc_name; 3600 for (i = 0; i < ncp->nc_nlen; i++) 3601 db_printf("%c", *ncn++); 3602 vp = ncp->nc_dvp; 3603 } else { 3604 vp = NULL; 3605 } 3606 } 3607 } 3608 db_printf("\n"); 3609 } 3610 3611 return; 3612 } 3613 3614 DB_SHOW_COMMAND(vpath, db_show_vpath) 3615 { 3616 struct vnode *vp; 3617 3618 if (!have_addr) { 3619 db_printf("usage: show vpath <struct vnode *>\n"); 3620 return; 3621 } 3622 3623 vp = (struct vnode *)addr; 3624 db_print_vpath(vp); 3625 } 3626 3627 #endif 3628 3629 static int cache_fast_lookup = 1; 3630 static char __read_frequently cache_fast_lookup_enabled = true; 3631 3632 #define CACHE_FPL_FAILED -2020 3633 3634 void 3635 cache_fast_lookup_enabled_recalc(void) 3636 { 3637 int lookup_flag; 3638 int mac_on; 3639 3640 #ifdef MAC 3641 mac_on = mac_vnode_check_lookup_enabled(); 3642 mac_on |= mac_vnode_check_readlink_enabled(); 3643 #else 3644 mac_on = 0; 3645 #endif 3646 3647 lookup_flag = atomic_load_int(&cache_fast_lookup); 3648 if (lookup_flag && !mac_on) { 3649 atomic_store_char(&cache_fast_lookup_enabled, true); 3650 } else { 3651 atomic_store_char(&cache_fast_lookup_enabled, false); 3652 } 3653 } 3654 3655 static int 3656 syscal_vfs_cache_fast_lookup(SYSCTL_HANDLER_ARGS) 3657 { 3658 int error, old; 3659 3660 old = atomic_load_int(&cache_fast_lookup); 3661 error = sysctl_handle_int(oidp, arg1, arg2, req); 3662 if (error == 0 && req->newptr && old != atomic_load_int(&cache_fast_lookup)) 3663 cache_fast_lookup_enabled_recalc(); 3664 return (error); 3665 } 3666 SYSCTL_PROC(_vfs, OID_AUTO, cache_fast_lookup, CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE, 3667 &cache_fast_lookup, 0, syscal_vfs_cache_fast_lookup, "IU", ""); 3668 3669 /* 3670 * Components of nameidata (or objects it can point to) which may 3671 * need restoring in case fast path lookup fails. 3672 */ 3673 struct nameidata_outer { 3674 size_t ni_pathlen; 3675 int cn_flags; 3676 }; 3677 3678 struct nameidata_saved { 3679 #ifdef INVARIANTS 3680 char *cn_nameptr; 3681 size_t ni_pathlen; 3682 #endif 3683 }; 3684 3685 #ifdef INVARIANTS 3686 struct cache_fpl_debug { 3687 size_t ni_pathlen; 3688 }; 3689 #endif 3690 3691 struct cache_fpl { 3692 struct nameidata *ndp; 3693 struct componentname *cnp; 3694 char *nulchar; 3695 struct pwd **pwd; 3696 struct vnode *dvp; 3697 struct vnode *tvp; 3698 seqc_t dvp_seqc; 3699 seqc_t tvp_seqc; 3700 struct nameidata_saved snd; 3701 struct nameidata_outer snd_outer; 3702 int line; 3703 enum cache_fpl_status status:8; 3704 bool in_smr; 3705 bool fsearch; 3706 bool savename; 3707 #ifdef INVARIANTS 3708 struct cache_fpl_debug debug; 3709 #endif 3710 }; 3711 3712 static bool cache_fplookup_is_mp(struct cache_fpl *fpl); 3713 static int cache_fplookup_cross_mount(struct cache_fpl *fpl); 3714 static int cache_fplookup_partial_setup(struct cache_fpl *fpl); 3715 static int cache_fplookup_skip_slashes(struct cache_fpl *fpl); 3716 static int cache_fplookup_preparse(struct cache_fpl *fpl); 3717 static void cache_fpl_pathlen_dec(struct cache_fpl *fpl); 3718 static void cache_fpl_pathlen_inc(struct cache_fpl *fpl); 3719 static void cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n); 3720 static void cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n); 3721 3722 static void 3723 cache_fpl_cleanup_cnp(struct componentname *cnp) 3724 { 3725 3726 uma_zfree(namei_zone, cnp->cn_pnbuf); 3727 #ifdef DIAGNOSTIC 3728 cnp->cn_pnbuf = NULL; 3729 cnp->cn_nameptr = NULL; 3730 #endif 3731 } 3732 3733 static struct vnode * 3734 cache_fpl_handle_root(struct cache_fpl *fpl) 3735 { 3736 struct nameidata *ndp; 3737 struct componentname *cnp; 3738 3739 ndp = fpl->ndp; 3740 cnp = fpl->cnp; 3741 3742 MPASS(*(cnp->cn_nameptr) == '/'); 3743 cnp->cn_nameptr++; 3744 cache_fpl_pathlen_dec(fpl); 3745 3746 if (__predict_false(*(cnp->cn_nameptr) == '/')) { 3747 do { 3748 cnp->cn_nameptr++; 3749 cache_fpl_pathlen_dec(fpl); 3750 } while (*(cnp->cn_nameptr) == '/'); 3751 } 3752 3753 return (ndp->ni_rootdir); 3754 } 3755 3756 static void 3757 cache_fpl_checkpoint_outer(struct cache_fpl *fpl) 3758 { 3759 3760 fpl->snd_outer.ni_pathlen = fpl->ndp->ni_pathlen; 3761 fpl->snd_outer.cn_flags = fpl->ndp->ni_cnd.cn_flags; 3762 } 3763 3764 static void 3765 cache_fpl_checkpoint(struct cache_fpl *fpl) 3766 { 3767 3768 #ifdef INVARIANTS 3769 fpl->snd.cn_nameptr = fpl->ndp->ni_cnd.cn_nameptr; 3770 fpl->snd.ni_pathlen = fpl->debug.ni_pathlen; 3771 #endif 3772 } 3773 3774 static void 3775 cache_fpl_restore_partial(struct cache_fpl *fpl) 3776 { 3777 3778 fpl->ndp->ni_cnd.cn_flags = fpl->snd_outer.cn_flags; 3779 #ifdef INVARIANTS 3780 fpl->debug.ni_pathlen = fpl->snd.ni_pathlen; 3781 #endif 3782 } 3783 3784 static void 3785 cache_fpl_restore_abort(struct cache_fpl *fpl) 3786 { 3787 3788 cache_fpl_restore_partial(fpl); 3789 /* 3790 * It is 0 on entry by API contract. 3791 */ 3792 fpl->ndp->ni_resflags = 0; 3793 fpl->ndp->ni_cnd.cn_nameptr = fpl->ndp->ni_cnd.cn_pnbuf; 3794 fpl->ndp->ni_pathlen = fpl->snd_outer.ni_pathlen; 3795 } 3796 3797 #ifdef INVARIANTS 3798 #define cache_fpl_smr_assert_entered(fpl) ({ \ 3799 struct cache_fpl *_fpl = (fpl); \ 3800 MPASS(_fpl->in_smr == true); \ 3801 VFS_SMR_ASSERT_ENTERED(); \ 3802 }) 3803 #define cache_fpl_smr_assert_not_entered(fpl) ({ \ 3804 struct cache_fpl *_fpl = (fpl); \ 3805 MPASS(_fpl->in_smr == false); \ 3806 VFS_SMR_ASSERT_NOT_ENTERED(); \ 3807 }) 3808 static void 3809 cache_fpl_assert_status(struct cache_fpl *fpl) 3810 { 3811 3812 switch (fpl->status) { 3813 case CACHE_FPL_STATUS_UNSET: 3814 __assert_unreachable(); 3815 break; 3816 case CACHE_FPL_STATUS_DESTROYED: 3817 case CACHE_FPL_STATUS_ABORTED: 3818 case CACHE_FPL_STATUS_PARTIAL: 3819 case CACHE_FPL_STATUS_HANDLED: 3820 break; 3821 } 3822 } 3823 #else 3824 #define cache_fpl_smr_assert_entered(fpl) do { } while (0) 3825 #define cache_fpl_smr_assert_not_entered(fpl) do { } while (0) 3826 #define cache_fpl_assert_status(fpl) do { } while (0) 3827 #endif 3828 3829 #define cache_fpl_smr_enter_initial(fpl) ({ \ 3830 struct cache_fpl *_fpl = (fpl); \ 3831 vfs_smr_enter(); \ 3832 _fpl->in_smr = true; \ 3833 }) 3834 3835 #define cache_fpl_smr_enter(fpl) ({ \ 3836 struct cache_fpl *_fpl = (fpl); \ 3837 MPASS(_fpl->in_smr == false); \ 3838 vfs_smr_enter(); \ 3839 _fpl->in_smr = true; \ 3840 }) 3841 3842 #define cache_fpl_smr_exit(fpl) ({ \ 3843 struct cache_fpl *_fpl = (fpl); \ 3844 MPASS(_fpl->in_smr == true); \ 3845 vfs_smr_exit(); \ 3846 _fpl->in_smr = false; \ 3847 }) 3848 3849 static int 3850 cache_fpl_aborted_early_impl(struct cache_fpl *fpl, int line) 3851 { 3852 3853 if (fpl->status != CACHE_FPL_STATUS_UNSET) { 3854 KASSERT(fpl->status == CACHE_FPL_STATUS_PARTIAL, 3855 ("%s: converting to abort from %d at %d, set at %d\n", 3856 __func__, fpl->status, line, fpl->line)); 3857 } 3858 cache_fpl_smr_assert_not_entered(fpl); 3859 fpl->status = CACHE_FPL_STATUS_ABORTED; 3860 fpl->line = line; 3861 return (CACHE_FPL_FAILED); 3862 } 3863 3864 #define cache_fpl_aborted_early(x) cache_fpl_aborted_early_impl((x), __LINE__) 3865 3866 static int __noinline 3867 cache_fpl_aborted_impl(struct cache_fpl *fpl, int line) 3868 { 3869 struct nameidata *ndp; 3870 struct componentname *cnp; 3871 3872 ndp = fpl->ndp; 3873 cnp = fpl->cnp; 3874 3875 if (fpl->status != CACHE_FPL_STATUS_UNSET) { 3876 KASSERT(fpl->status == CACHE_FPL_STATUS_PARTIAL, 3877 ("%s: converting to abort from %d at %d, set at %d\n", 3878 __func__, fpl->status, line, fpl->line)); 3879 } 3880 fpl->status = CACHE_FPL_STATUS_ABORTED; 3881 fpl->line = line; 3882 if (fpl->in_smr) 3883 cache_fpl_smr_exit(fpl); 3884 cache_fpl_restore_abort(fpl); 3885 /* 3886 * Resolving symlinks overwrites data passed by the caller. 3887 * Let namei know. 3888 */ 3889 if (ndp->ni_loopcnt > 0) { 3890 fpl->status = CACHE_FPL_STATUS_DESTROYED; 3891 cache_fpl_cleanup_cnp(cnp); 3892 } 3893 return (CACHE_FPL_FAILED); 3894 } 3895 3896 #define cache_fpl_aborted(x) cache_fpl_aborted_impl((x), __LINE__) 3897 3898 static int __noinline 3899 cache_fpl_partial_impl(struct cache_fpl *fpl, int line) 3900 { 3901 3902 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, 3903 ("%s: setting to partial at %d, but already set to %d at %d\n", 3904 __func__, line, fpl->status, fpl->line)); 3905 cache_fpl_smr_assert_entered(fpl); 3906 fpl->status = CACHE_FPL_STATUS_PARTIAL; 3907 fpl->line = line; 3908 return (cache_fplookup_partial_setup(fpl)); 3909 } 3910 3911 #define cache_fpl_partial(x) cache_fpl_partial_impl((x), __LINE__) 3912 3913 static int 3914 cache_fpl_handled_impl(struct cache_fpl *fpl, int line) 3915 { 3916 3917 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, 3918 ("%s: setting to handled at %d, but already set to %d at %d\n", 3919 __func__, line, fpl->status, fpl->line)); 3920 cache_fpl_smr_assert_not_entered(fpl); 3921 fpl->status = CACHE_FPL_STATUS_HANDLED; 3922 fpl->line = line; 3923 return (0); 3924 } 3925 3926 #define cache_fpl_handled(x) cache_fpl_handled_impl((x), __LINE__) 3927 3928 static int 3929 cache_fpl_handled_error_impl(struct cache_fpl *fpl, int error, int line) 3930 { 3931 3932 KASSERT(fpl->status == CACHE_FPL_STATUS_UNSET, 3933 ("%s: setting to handled at %d, but already set to %d at %d\n", 3934 __func__, line, fpl->status, fpl->line)); 3935 MPASS(error != 0); 3936 MPASS(error != CACHE_FPL_FAILED); 3937 cache_fpl_smr_assert_not_entered(fpl); 3938 fpl->status = CACHE_FPL_STATUS_HANDLED; 3939 fpl->line = line; 3940 fpl->dvp = NULL; 3941 fpl->tvp = NULL; 3942 fpl->savename = false; 3943 return (error); 3944 } 3945 3946 #define cache_fpl_handled_error(x, e) cache_fpl_handled_error_impl((x), (e), __LINE__) 3947 3948 static bool 3949 cache_fpl_terminated(struct cache_fpl *fpl) 3950 { 3951 3952 return (fpl->status != CACHE_FPL_STATUS_UNSET); 3953 } 3954 3955 #define CACHE_FPL_SUPPORTED_CN_FLAGS \ 3956 (NC_NOMAKEENTRY | NC_KEEPPOSENTRY | LOCKLEAF | LOCKPARENT | WANTPARENT | \ 3957 FAILIFEXISTS | FOLLOW | LOCKSHARED | SAVENAME | SAVESTART | WILLBEDIR | \ 3958 ISOPEN | NOMACCHECK | AUDITVNODE1 | AUDITVNODE2 | NOCAPCHECK) 3959 3960 #define CACHE_FPL_INTERNAL_CN_FLAGS \ 3961 (ISDOTDOT | MAKEENTRY | ISLASTCN) 3962 3963 _Static_assert((CACHE_FPL_SUPPORTED_CN_FLAGS & CACHE_FPL_INTERNAL_CN_FLAGS) == 0, 3964 "supported and internal flags overlap"); 3965 3966 static bool 3967 cache_fpl_islastcn(struct nameidata *ndp) 3968 { 3969 3970 return (*ndp->ni_next == 0); 3971 } 3972 3973 static bool 3974 cache_fpl_isdotdot(struct componentname *cnp) 3975 { 3976 3977 if (cnp->cn_namelen == 2 && 3978 cnp->cn_nameptr[1] == '.' && cnp->cn_nameptr[0] == '.') 3979 return (true); 3980 return (false); 3981 } 3982 3983 static bool 3984 cache_can_fplookup(struct cache_fpl *fpl) 3985 { 3986 struct nameidata *ndp; 3987 struct componentname *cnp; 3988 struct thread *td; 3989 3990 ndp = fpl->ndp; 3991 cnp = fpl->cnp; 3992 td = cnp->cn_thread; 3993 3994 if (!atomic_load_char(&cache_fast_lookup_enabled)) { 3995 cache_fpl_aborted_early(fpl); 3996 return (false); 3997 } 3998 if ((cnp->cn_flags & ~CACHE_FPL_SUPPORTED_CN_FLAGS) != 0) { 3999 cache_fpl_aborted_early(fpl); 4000 return (false); 4001 } 4002 if (IN_CAPABILITY_MODE(td)) { 4003 cache_fpl_aborted_early(fpl); 4004 return (false); 4005 } 4006 if (AUDITING_TD(td)) { 4007 cache_fpl_aborted_early(fpl); 4008 return (false); 4009 } 4010 if (ndp->ni_startdir != NULL) { 4011 cache_fpl_aborted_early(fpl); 4012 return (false); 4013 } 4014 return (true); 4015 } 4016 4017 static int 4018 cache_fplookup_dirfd(struct cache_fpl *fpl, struct vnode **vpp) 4019 { 4020 struct nameidata *ndp; 4021 int error; 4022 bool fsearch; 4023 4024 ndp = fpl->ndp; 4025 error = fgetvp_lookup_smr(ndp->ni_dirfd, ndp, vpp, &fsearch); 4026 if (__predict_false(error != 0)) { 4027 return (cache_fpl_aborted(fpl)); 4028 } 4029 fpl->fsearch = fsearch; 4030 return (0); 4031 } 4032 4033 static int __noinline 4034 cache_fplookup_negative_promote(struct cache_fpl *fpl, struct namecache *oncp, 4035 uint32_t hash) 4036 { 4037 struct componentname *cnp; 4038 struct vnode *dvp; 4039 4040 cnp = fpl->cnp; 4041 dvp = fpl->dvp; 4042 4043 cache_fpl_smr_exit(fpl); 4044 if (cache_neg_promote_cond(dvp, cnp, oncp, hash)) 4045 return (cache_fpl_handled_error(fpl, ENOENT)); 4046 else 4047 return (cache_fpl_aborted(fpl)); 4048 } 4049 4050 /* 4051 * The target vnode is not supported, prepare for the slow path to take over. 4052 */ 4053 static int __noinline 4054 cache_fplookup_partial_setup(struct cache_fpl *fpl) 4055 { 4056 struct nameidata *ndp; 4057 struct componentname *cnp; 4058 enum vgetstate dvs; 4059 struct vnode *dvp; 4060 struct pwd *pwd; 4061 seqc_t dvp_seqc; 4062 4063 ndp = fpl->ndp; 4064 cnp = fpl->cnp; 4065 pwd = *(fpl->pwd); 4066 dvp = fpl->dvp; 4067 dvp_seqc = fpl->dvp_seqc; 4068 4069 if (!pwd_hold_smr(pwd)) { 4070 return (cache_fpl_aborted(fpl)); 4071 } 4072 4073 /* 4074 * Note that seqc is checked before the vnode is locked, so by 4075 * the time regular lookup gets to it it may have moved. 4076 * 4077 * Ultimately this does not affect correctness, any lookup errors 4078 * are userspace racing with itself. It is guaranteed that any 4079 * path which ultimately gets found could also have been found 4080 * by regular lookup going all the way in absence of concurrent 4081 * modifications. 4082 */ 4083 dvs = vget_prep_smr(dvp); 4084 cache_fpl_smr_exit(fpl); 4085 if (__predict_false(dvs == VGET_NONE)) { 4086 pwd_drop(pwd); 4087 return (cache_fpl_aborted(fpl)); 4088 } 4089 4090 vget_finish_ref(dvp, dvs); 4091 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4092 vrele(dvp); 4093 pwd_drop(pwd); 4094 return (cache_fpl_aborted(fpl)); 4095 } 4096 4097 cache_fpl_restore_partial(fpl); 4098 #ifdef INVARIANTS 4099 if (cnp->cn_nameptr != fpl->snd.cn_nameptr) { 4100 panic("%s: cn_nameptr mismatch (%p != %p) full [%s]\n", __func__, 4101 cnp->cn_nameptr, fpl->snd.cn_nameptr, cnp->cn_pnbuf); 4102 } 4103 #endif 4104 4105 ndp->ni_startdir = dvp; 4106 cnp->cn_flags |= MAKEENTRY; 4107 if (cache_fpl_islastcn(ndp)) 4108 cnp->cn_flags |= ISLASTCN; 4109 if (cache_fpl_isdotdot(cnp)) 4110 cnp->cn_flags |= ISDOTDOT; 4111 4112 /* 4113 * Skip potential extra slashes parsing did not take care of. 4114 * cache_fplookup_skip_slashes explains the mechanism. 4115 */ 4116 if (__predict_false(*(cnp->cn_nameptr) == '/')) { 4117 do { 4118 cnp->cn_nameptr++; 4119 cache_fpl_pathlen_dec(fpl); 4120 } while (*(cnp->cn_nameptr) == '/'); 4121 } 4122 4123 ndp->ni_pathlen = fpl->nulchar - cnp->cn_nameptr + 1; 4124 #ifdef INVARIANTS 4125 if (ndp->ni_pathlen != fpl->debug.ni_pathlen) { 4126 panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", 4127 __func__, ndp->ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, 4128 cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); 4129 } 4130 #endif 4131 return (0); 4132 } 4133 4134 static int 4135 cache_fplookup_final_child(struct cache_fpl *fpl, enum vgetstate tvs) 4136 { 4137 struct componentname *cnp; 4138 struct vnode *tvp; 4139 seqc_t tvp_seqc; 4140 int error, lkflags; 4141 4142 cnp = fpl->cnp; 4143 tvp = fpl->tvp; 4144 tvp_seqc = fpl->tvp_seqc; 4145 4146 if ((cnp->cn_flags & LOCKLEAF) != 0) { 4147 lkflags = LK_SHARED; 4148 if ((cnp->cn_flags & LOCKSHARED) == 0) 4149 lkflags = LK_EXCLUSIVE; 4150 error = vget_finish(tvp, lkflags, tvs); 4151 if (__predict_false(error != 0)) { 4152 return (cache_fpl_aborted(fpl)); 4153 } 4154 } else { 4155 vget_finish_ref(tvp, tvs); 4156 } 4157 4158 if (!vn_seqc_consistent(tvp, tvp_seqc)) { 4159 if ((cnp->cn_flags & LOCKLEAF) != 0) 4160 vput(tvp); 4161 else 4162 vrele(tvp); 4163 return (cache_fpl_aborted(fpl)); 4164 } 4165 4166 return (cache_fpl_handled(fpl)); 4167 } 4168 4169 /* 4170 * They want to possibly modify the state of the namecache. 4171 */ 4172 static int __noinline 4173 cache_fplookup_final_modifying(struct cache_fpl *fpl) 4174 { 4175 struct nameidata *ndp; 4176 struct componentname *cnp; 4177 enum vgetstate dvs; 4178 struct vnode *dvp, *tvp; 4179 struct mount *mp; 4180 seqc_t dvp_seqc; 4181 int error; 4182 bool docache; 4183 4184 ndp = fpl->ndp; 4185 cnp = fpl->cnp; 4186 dvp = fpl->dvp; 4187 dvp_seqc = fpl->dvp_seqc; 4188 4189 MPASS(*(cnp->cn_nameptr) != '/'); 4190 MPASS(cache_fpl_islastcn(ndp)); 4191 if ((cnp->cn_flags & LOCKPARENT) == 0) 4192 MPASS((cnp->cn_flags & WANTPARENT) != 0); 4193 MPASS((cnp->cn_flags & TRAILINGSLASH) == 0); 4194 MPASS(cnp->cn_nameiop == CREATE || cnp->cn_nameiop == DELETE || 4195 cnp->cn_nameiop == RENAME); 4196 MPASS((cnp->cn_flags & MAKEENTRY) == 0); 4197 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 4198 4199 docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; 4200 if (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME) 4201 docache = false; 4202 4203 mp = atomic_load_ptr(&dvp->v_mount); 4204 if (__predict_false(mp == NULL)) { 4205 return (cache_fpl_aborted(fpl)); 4206 } 4207 4208 if (__predict_false(mp->mnt_flag & MNT_RDONLY)) { 4209 cache_fpl_smr_exit(fpl); 4210 /* 4211 * Original code keeps not checking for CREATE which 4212 * might be a bug. For now let the old lookup decide. 4213 */ 4214 if (cnp->cn_nameiop == CREATE) { 4215 return (cache_fpl_aborted(fpl)); 4216 } 4217 return (cache_fpl_handled_error(fpl, EROFS)); 4218 } 4219 4220 if (fpl->tvp != NULL && (cnp->cn_flags & FAILIFEXISTS) != 0) { 4221 cache_fpl_smr_exit(fpl); 4222 return (cache_fpl_handled_error(fpl, EEXIST)); 4223 } 4224 4225 /* 4226 * Secure access to dvp; check cache_fplookup_partial_setup for 4227 * reasoning. 4228 * 4229 * XXX At least UFS requires its lookup routine to be called for 4230 * the last path component, which leads to some level of complication 4231 * and inefficiency: 4232 * - the target routine always locks the target vnode, but our caller 4233 * may not need it locked 4234 * - some of the VOP machinery asserts that the parent is locked, which 4235 * once more may be not required 4236 * 4237 * TODO: add a flag for filesystems which don't need this. 4238 */ 4239 dvs = vget_prep_smr(dvp); 4240 cache_fpl_smr_exit(fpl); 4241 if (__predict_false(dvs == VGET_NONE)) { 4242 return (cache_fpl_aborted(fpl)); 4243 } 4244 4245 vget_finish_ref(dvp, dvs); 4246 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4247 vrele(dvp); 4248 return (cache_fpl_aborted(fpl)); 4249 } 4250 4251 error = vn_lock(dvp, LK_EXCLUSIVE); 4252 if (__predict_false(error != 0)) { 4253 vrele(dvp); 4254 return (cache_fpl_aborted(fpl)); 4255 } 4256 4257 tvp = NULL; 4258 cnp->cn_flags |= ISLASTCN; 4259 if (docache) 4260 cnp->cn_flags |= MAKEENTRY; 4261 if (cache_fpl_isdotdot(cnp)) 4262 cnp->cn_flags |= ISDOTDOT; 4263 cnp->cn_lkflags = LK_EXCLUSIVE; 4264 error = VOP_LOOKUP(dvp, &tvp, cnp); 4265 switch (error) { 4266 case EJUSTRETURN: 4267 case 0: 4268 break; 4269 case ENOTDIR: 4270 case ENOENT: 4271 vput(dvp); 4272 return (cache_fpl_handled_error(fpl, error)); 4273 default: 4274 vput(dvp); 4275 return (cache_fpl_aborted(fpl)); 4276 } 4277 4278 fpl->tvp = tvp; 4279 fpl->savename = (cnp->cn_flags & SAVENAME) != 0; 4280 4281 if (tvp == NULL) { 4282 if ((cnp->cn_flags & SAVESTART) != 0) { 4283 ndp->ni_startdir = dvp; 4284 vrefact(ndp->ni_startdir); 4285 cnp->cn_flags |= SAVENAME; 4286 fpl->savename = true; 4287 } 4288 MPASS(error == EJUSTRETURN); 4289 if ((cnp->cn_flags & LOCKPARENT) == 0) { 4290 VOP_UNLOCK(dvp); 4291 } 4292 return (cache_fpl_handled(fpl)); 4293 } 4294 4295 /* 4296 * There are very hairy corner cases concerning various flag combinations 4297 * and locking state. In particular here we only hold one lock instead of 4298 * two. 4299 * 4300 * Skip the complexity as it is of no significance for normal workloads. 4301 */ 4302 if (__predict_false(tvp == dvp)) { 4303 vput(dvp); 4304 vrele(tvp); 4305 return (cache_fpl_aborted(fpl)); 4306 } 4307 4308 /* 4309 * Check if the target is either a symlink or a mount point. 4310 * Since we expect this to be the terminal vnode it should 4311 * almost never be true. 4312 */ 4313 if (__predict_false(tvp->v_type == VLNK || cache_fplookup_is_mp(fpl))) { 4314 vput(dvp); 4315 vput(tvp); 4316 return (cache_fpl_aborted(fpl)); 4317 } 4318 4319 if ((cnp->cn_flags & FAILIFEXISTS) != 0) { 4320 vput(dvp); 4321 vput(tvp); 4322 return (cache_fpl_handled_error(fpl, EEXIST)); 4323 } 4324 4325 if ((cnp->cn_flags & LOCKLEAF) == 0) { 4326 VOP_UNLOCK(tvp); 4327 } 4328 4329 if ((cnp->cn_flags & LOCKPARENT) == 0) { 4330 VOP_UNLOCK(dvp); 4331 } 4332 4333 if ((cnp->cn_flags & SAVESTART) != 0) { 4334 ndp->ni_startdir = dvp; 4335 vrefact(ndp->ni_startdir); 4336 cnp->cn_flags |= SAVENAME; 4337 fpl->savename = true; 4338 } 4339 4340 return (cache_fpl_handled(fpl)); 4341 } 4342 4343 static int __noinline 4344 cache_fplookup_modifying(struct cache_fpl *fpl) 4345 { 4346 struct nameidata *ndp; 4347 4348 ndp = fpl->ndp; 4349 4350 if (!cache_fpl_islastcn(ndp)) { 4351 return (cache_fpl_partial(fpl)); 4352 } 4353 return (cache_fplookup_final_modifying(fpl)); 4354 } 4355 4356 static int __noinline 4357 cache_fplookup_final_withparent(struct cache_fpl *fpl) 4358 { 4359 struct componentname *cnp; 4360 enum vgetstate dvs, tvs; 4361 struct vnode *dvp, *tvp; 4362 seqc_t dvp_seqc; 4363 int error; 4364 4365 cnp = fpl->cnp; 4366 dvp = fpl->dvp; 4367 dvp_seqc = fpl->dvp_seqc; 4368 tvp = fpl->tvp; 4369 4370 MPASS((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0); 4371 4372 /* 4373 * This is less efficient than it can be for simplicity. 4374 */ 4375 dvs = vget_prep_smr(dvp); 4376 if (__predict_false(dvs == VGET_NONE)) { 4377 return (cache_fpl_aborted(fpl)); 4378 } 4379 tvs = vget_prep_smr(tvp); 4380 if (__predict_false(tvs == VGET_NONE)) { 4381 cache_fpl_smr_exit(fpl); 4382 vget_abort(dvp, dvs); 4383 return (cache_fpl_aborted(fpl)); 4384 } 4385 4386 cache_fpl_smr_exit(fpl); 4387 4388 if ((cnp->cn_flags & LOCKPARENT) != 0) { 4389 error = vget_finish(dvp, LK_EXCLUSIVE, dvs); 4390 if (__predict_false(error != 0)) { 4391 vget_abort(tvp, tvs); 4392 return (cache_fpl_aborted(fpl)); 4393 } 4394 } else { 4395 vget_finish_ref(dvp, dvs); 4396 } 4397 4398 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4399 vget_abort(tvp, tvs); 4400 if ((cnp->cn_flags & LOCKPARENT) != 0) 4401 vput(dvp); 4402 else 4403 vrele(dvp); 4404 return (cache_fpl_aborted(fpl)); 4405 } 4406 4407 error = cache_fplookup_final_child(fpl, tvs); 4408 if (__predict_false(error != 0)) { 4409 MPASS(fpl->status == CACHE_FPL_STATUS_ABORTED); 4410 if ((cnp->cn_flags & LOCKPARENT) != 0) 4411 vput(dvp); 4412 else 4413 vrele(dvp); 4414 return (error); 4415 } 4416 4417 MPASS(fpl->status == CACHE_FPL_STATUS_HANDLED); 4418 return (0); 4419 } 4420 4421 static int 4422 cache_fplookup_final(struct cache_fpl *fpl) 4423 { 4424 struct componentname *cnp; 4425 enum vgetstate tvs; 4426 struct vnode *dvp, *tvp; 4427 seqc_t dvp_seqc; 4428 4429 cnp = fpl->cnp; 4430 dvp = fpl->dvp; 4431 dvp_seqc = fpl->dvp_seqc; 4432 tvp = fpl->tvp; 4433 4434 MPASS(*(cnp->cn_nameptr) != '/'); 4435 4436 if (cnp->cn_nameiop != LOOKUP) { 4437 return (cache_fplookup_final_modifying(fpl)); 4438 } 4439 4440 if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0) 4441 return (cache_fplookup_final_withparent(fpl)); 4442 4443 tvs = vget_prep_smr(tvp); 4444 if (__predict_false(tvs == VGET_NONE)) { 4445 return (cache_fpl_partial(fpl)); 4446 } 4447 4448 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4449 cache_fpl_smr_exit(fpl); 4450 vget_abort(tvp, tvs); 4451 return (cache_fpl_aborted(fpl)); 4452 } 4453 4454 cache_fpl_smr_exit(fpl); 4455 return (cache_fplookup_final_child(fpl, tvs)); 4456 } 4457 4458 /* 4459 * Comment from locked lookup: 4460 * Check for degenerate name (e.g. / or "") which is a way of talking about a 4461 * directory, e.g. like "/." or ".". 4462 */ 4463 static int __noinline 4464 cache_fplookup_degenerate(struct cache_fpl *fpl) 4465 { 4466 struct componentname *cnp; 4467 struct vnode *dvp; 4468 enum vgetstate dvs; 4469 int error, lkflags; 4470 4471 fpl->tvp = fpl->dvp; 4472 fpl->tvp_seqc = fpl->dvp_seqc; 4473 4474 cnp = fpl->cnp; 4475 dvp = fpl->dvp; 4476 4477 if (__predict_false(cnp->cn_nameiop != LOOKUP)) { 4478 cache_fpl_smr_exit(fpl); 4479 return (cache_fpl_handled_error(fpl, EISDIR)); 4480 } 4481 4482 MPASS((cnp->cn_flags & SAVESTART) == 0); 4483 4484 if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0) { 4485 return (cache_fplookup_final_withparent(fpl)); 4486 } 4487 4488 dvs = vget_prep_smr(dvp); 4489 cache_fpl_smr_exit(fpl); 4490 if (__predict_false(dvs == VGET_NONE)) { 4491 return (cache_fpl_aborted(fpl)); 4492 } 4493 4494 if ((cnp->cn_flags & LOCKLEAF) != 0) { 4495 lkflags = LK_SHARED; 4496 if ((cnp->cn_flags & LOCKSHARED) == 0) 4497 lkflags = LK_EXCLUSIVE; 4498 error = vget_finish(dvp, lkflags, dvs); 4499 if (__predict_false(error != 0)) { 4500 return (cache_fpl_aborted(fpl)); 4501 } 4502 } else { 4503 vget_finish_ref(dvp, dvs); 4504 } 4505 return (cache_fpl_handled(fpl)); 4506 } 4507 4508 static int __noinline 4509 cache_fplookup_noentry(struct cache_fpl *fpl) 4510 { 4511 struct nameidata *ndp; 4512 struct componentname *cnp; 4513 enum vgetstate dvs; 4514 struct vnode *dvp, *tvp; 4515 seqc_t dvp_seqc; 4516 int error; 4517 bool docache; 4518 4519 ndp = fpl->ndp; 4520 cnp = fpl->cnp; 4521 dvp = fpl->dvp; 4522 dvp_seqc = fpl->dvp_seqc; 4523 4524 MPASS(*(cnp->cn_nameptr) != '/'); 4525 MPASS((cnp->cn_flags & MAKEENTRY) == 0); 4526 MPASS((cnp->cn_flags & ISDOTDOT) == 0); 4527 MPASS(!cache_fpl_isdotdot(cnp)); 4528 4529 /* 4530 * Hack: delayed name len checking. 4531 */ 4532 if (__predict_false(cnp->cn_namelen > NAME_MAX)) { 4533 cache_fpl_smr_exit(fpl); 4534 return (cache_fpl_handled_error(fpl, ENAMETOOLONG)); 4535 } 4536 4537 if (cnp->cn_nameiop != LOOKUP) { 4538 fpl->tvp = NULL; 4539 return (cache_fplookup_modifying(fpl)); 4540 } 4541 4542 MPASS((cnp->cn_flags & SAVESTART) == 0); 4543 4544 /* 4545 * Only try to fill in the component if it is the last one, 4546 * otherwise not only there may be several to handle but the 4547 * walk may be complicated. 4548 */ 4549 if (!cache_fpl_islastcn(ndp)) { 4550 return (cache_fpl_partial(fpl)); 4551 } 4552 4553 /* 4554 * Secure access to dvp; check cache_fplookup_partial_setup for 4555 * reasoning. 4556 */ 4557 dvs = vget_prep_smr(dvp); 4558 cache_fpl_smr_exit(fpl); 4559 if (__predict_false(dvs == VGET_NONE)) { 4560 return (cache_fpl_aborted(fpl)); 4561 } 4562 4563 vget_finish_ref(dvp, dvs); 4564 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 4565 vrele(dvp); 4566 return (cache_fpl_aborted(fpl)); 4567 } 4568 4569 error = vn_lock(dvp, LK_SHARED); 4570 if (__predict_false(error != 0)) { 4571 vrele(dvp); 4572 return (cache_fpl_aborted(fpl)); 4573 } 4574 4575 tvp = NULL; 4576 /* 4577 * TODO: provide variants which don't require locking either vnode. 4578 */ 4579 cnp->cn_flags |= ISLASTCN; 4580 docache = (cnp->cn_flags & NOCACHE) ^ NOCACHE; 4581 if (docache) 4582 cnp->cn_flags |= MAKEENTRY; 4583 cnp->cn_lkflags = LK_SHARED; 4584 if ((cnp->cn_flags & LOCKSHARED) == 0) { 4585 cnp->cn_lkflags = LK_EXCLUSIVE; 4586 } 4587 error = VOP_LOOKUP(dvp, &tvp, cnp); 4588 switch (error) { 4589 case EJUSTRETURN: 4590 case 0: 4591 break; 4592 case ENOTDIR: 4593 case ENOENT: 4594 vput(dvp); 4595 return (cache_fpl_handled_error(fpl, error)); 4596 default: 4597 vput(dvp); 4598 return (cache_fpl_aborted(fpl)); 4599 } 4600 4601 fpl->tvp = tvp; 4602 if (!fpl->savename) { 4603 MPASS((cnp->cn_flags & SAVENAME) == 0); 4604 } 4605 4606 if (tvp == NULL) { 4607 MPASS(error == EJUSTRETURN); 4608 if ((cnp->cn_flags & (WANTPARENT | LOCKPARENT)) == 0) { 4609 vput(dvp); 4610 } else if ((cnp->cn_flags & LOCKPARENT) == 0) { 4611 VOP_UNLOCK(dvp); 4612 } 4613 return (cache_fpl_handled(fpl)); 4614 } 4615 4616 if (__predict_false(tvp->v_type == VLNK || cache_fplookup_is_mp(fpl))) { 4617 vput(dvp); 4618 vput(tvp); 4619 return (cache_fpl_aborted(fpl)); 4620 } 4621 4622 if ((cnp->cn_flags & LOCKLEAF) == 0) { 4623 VOP_UNLOCK(tvp); 4624 } 4625 4626 if ((cnp->cn_flags & (WANTPARENT | LOCKPARENT)) == 0) { 4627 vput(dvp); 4628 } else if ((cnp->cn_flags & LOCKPARENT) == 0) { 4629 VOP_UNLOCK(dvp); 4630 } 4631 return (cache_fpl_handled(fpl)); 4632 } 4633 4634 static int __noinline 4635 cache_fplookup_dot(struct cache_fpl *fpl) 4636 { 4637 int error; 4638 4639 MPASS(!seqc_in_modify(fpl->dvp_seqc)); 4640 /* 4641 * Just re-assign the value. seqc will be checked later for the first 4642 * non-dot path component in line and/or before deciding to return the 4643 * vnode. 4644 */ 4645 fpl->tvp = fpl->dvp; 4646 fpl->tvp_seqc = fpl->dvp_seqc; 4647 4648 counter_u64_add(dothits, 1); 4649 SDT_PROBE3(vfs, namecache, lookup, hit, fpl->dvp, ".", fpl->dvp); 4650 4651 error = 0; 4652 if (cache_fplookup_is_mp(fpl)) { 4653 error = cache_fplookup_cross_mount(fpl); 4654 } 4655 return (error); 4656 } 4657 4658 static int __noinline 4659 cache_fplookup_dotdot(struct cache_fpl *fpl) 4660 { 4661 struct nameidata *ndp; 4662 struct componentname *cnp; 4663 struct namecache *ncp; 4664 struct vnode *dvp; 4665 struct prison *pr; 4666 u_char nc_flag; 4667 4668 ndp = fpl->ndp; 4669 cnp = fpl->cnp; 4670 dvp = fpl->dvp; 4671 4672 MPASS(cache_fpl_isdotdot(cnp)); 4673 4674 /* 4675 * XXX this is racy the same way regular lookup is 4676 */ 4677 for (pr = cnp->cn_cred->cr_prison; pr != NULL; 4678 pr = pr->pr_parent) 4679 if (dvp == pr->pr_root) 4680 break; 4681 4682 if (dvp == ndp->ni_rootdir || 4683 dvp == ndp->ni_topdir || 4684 dvp == rootvnode || 4685 pr != NULL) { 4686 fpl->tvp = dvp; 4687 fpl->tvp_seqc = vn_seqc_read_any(dvp); 4688 if (seqc_in_modify(fpl->tvp_seqc)) { 4689 return (cache_fpl_aborted(fpl)); 4690 } 4691 return (0); 4692 } 4693 4694 if ((dvp->v_vflag & VV_ROOT) != 0) { 4695 /* 4696 * TODO 4697 * The opposite of climb mount is needed here. 4698 */ 4699 return (cache_fpl_aborted(fpl)); 4700 } 4701 4702 ncp = atomic_load_ptr(&dvp->v_cache_dd); 4703 if (ncp == NULL) { 4704 return (cache_fpl_aborted(fpl)); 4705 } 4706 4707 nc_flag = atomic_load_char(&ncp->nc_flag); 4708 if ((nc_flag & NCF_ISDOTDOT) != 0) { 4709 if ((nc_flag & NCF_NEGATIVE) != 0) 4710 return (cache_fpl_aborted(fpl)); 4711 fpl->tvp = ncp->nc_vp; 4712 } else { 4713 fpl->tvp = ncp->nc_dvp; 4714 } 4715 4716 if (!cache_ncp_canuse(ncp)) { 4717 return (cache_fpl_aborted(fpl)); 4718 } 4719 4720 fpl->tvp_seqc = vn_seqc_read_any(fpl->tvp); 4721 if (seqc_in_modify(fpl->tvp_seqc)) { 4722 return (cache_fpl_partial(fpl)); 4723 } 4724 4725 counter_u64_add(dotdothits, 1); 4726 return (0); 4727 } 4728 4729 static int __noinline 4730 cache_fplookup_neg(struct cache_fpl *fpl, struct namecache *ncp, uint32_t hash) 4731 { 4732 u_char nc_flag; 4733 bool neg_promote; 4734 4735 nc_flag = atomic_load_char(&ncp->nc_flag); 4736 MPASS((nc_flag & NCF_NEGATIVE) != 0); 4737 /* 4738 * If they want to create an entry we need to replace this one. 4739 */ 4740 if (__predict_false(fpl->cnp->cn_nameiop != LOOKUP)) { 4741 fpl->tvp = NULL; 4742 return (cache_fplookup_modifying(fpl)); 4743 } 4744 neg_promote = cache_neg_hit_prep(ncp); 4745 if (!cache_fpl_neg_ncp_canuse(ncp)) { 4746 cache_neg_hit_abort(ncp); 4747 return (cache_fpl_partial(fpl)); 4748 } 4749 if (neg_promote) { 4750 return (cache_fplookup_negative_promote(fpl, ncp, hash)); 4751 } 4752 cache_neg_hit_finish(ncp); 4753 cache_fpl_smr_exit(fpl); 4754 return (cache_fpl_handled_error(fpl, ENOENT)); 4755 } 4756 4757 /* 4758 * Resolve a symlink. Called by filesystem-specific routines. 4759 * 4760 * Code flow is: 4761 * ... -> cache_fplookup_symlink -> VOP_FPLOOKUP_SYMLINK -> cache_symlink_resolve 4762 */ 4763 int 4764 cache_symlink_resolve(struct cache_fpl *fpl, const char *string, size_t len) 4765 { 4766 struct nameidata *ndp; 4767 struct componentname *cnp; 4768 4769 ndp = fpl->ndp; 4770 cnp = fpl->cnp; 4771 4772 if (__predict_false(len == 0)) { 4773 return (ENOENT); 4774 } 4775 4776 ndp->ni_pathlen = fpl->nulchar - cnp->cn_nameptr - cnp->cn_namelen + 1; 4777 #ifdef INVARIANTS 4778 if (ndp->ni_pathlen != fpl->debug.ni_pathlen) { 4779 panic("%s: mismatch (%zu != %zu) nulchar %p nameptr %p [%s] ; full string [%s]\n", 4780 __func__, ndp->ni_pathlen, fpl->debug.ni_pathlen, fpl->nulchar, 4781 cnp->cn_nameptr, cnp->cn_nameptr, cnp->cn_pnbuf); 4782 } 4783 #endif 4784 4785 if (__predict_false(len + ndp->ni_pathlen > MAXPATHLEN)) { 4786 return (ENAMETOOLONG); 4787 } 4788 4789 if (__predict_false(ndp->ni_loopcnt++ >= MAXSYMLINKS)) { 4790 return (ELOOP); 4791 } 4792 4793 if (ndp->ni_pathlen > 1) { 4794 bcopy(ndp->ni_next, cnp->cn_pnbuf + len, ndp->ni_pathlen); 4795 } else { 4796 cnp->cn_pnbuf[len] = '\0'; 4797 } 4798 bcopy(string, cnp->cn_pnbuf, len); 4799 4800 ndp->ni_pathlen += len; 4801 cache_fpl_pathlen_add(fpl, len); 4802 cnp->cn_nameptr = cnp->cn_pnbuf; 4803 fpl->nulchar = &cnp->cn_nameptr[ndp->ni_pathlen - 1]; 4804 4805 return (0); 4806 } 4807 4808 static int __noinline 4809 cache_fplookup_symlink(struct cache_fpl *fpl) 4810 { 4811 struct nameidata *ndp; 4812 struct componentname *cnp; 4813 struct vnode *dvp, *tvp; 4814 int error; 4815 4816 ndp = fpl->ndp; 4817 cnp = fpl->cnp; 4818 dvp = fpl->dvp; 4819 tvp = fpl->tvp; 4820 4821 if (cache_fpl_islastcn(ndp)) { 4822 if ((cnp->cn_flags & FOLLOW) == 0) { 4823 return (cache_fplookup_final(fpl)); 4824 } 4825 } 4826 4827 error = VOP_FPLOOKUP_SYMLINK(tvp, fpl); 4828 if (__predict_false(error != 0)) { 4829 switch (error) { 4830 case EAGAIN: 4831 return (cache_fpl_partial(fpl)); 4832 case ENOENT: 4833 case ENAMETOOLONG: 4834 case ELOOP: 4835 cache_fpl_smr_exit(fpl); 4836 return (cache_fpl_handled_error(fpl, error)); 4837 default: 4838 return (cache_fpl_aborted(fpl)); 4839 } 4840 } 4841 4842 if (*(cnp->cn_nameptr) == '/') { 4843 fpl->dvp = cache_fpl_handle_root(fpl); 4844 fpl->dvp_seqc = vn_seqc_read_any(fpl->dvp); 4845 if (seqc_in_modify(fpl->dvp_seqc)) { 4846 return (cache_fpl_aborted(fpl)); 4847 } 4848 } 4849 4850 return (cache_fplookup_preparse(fpl)); 4851 } 4852 4853 static int 4854 cache_fplookup_next(struct cache_fpl *fpl) 4855 { 4856 struct componentname *cnp; 4857 struct namecache *ncp; 4858 struct vnode *dvp, *tvp; 4859 u_char nc_flag; 4860 uint32_t hash; 4861 int error; 4862 4863 cnp = fpl->cnp; 4864 dvp = fpl->dvp; 4865 4866 if (__predict_false(cnp->cn_nameptr[0] == '.')) { 4867 if (cnp->cn_namelen == 1) { 4868 return (cache_fplookup_dot(fpl)); 4869 } 4870 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 4871 return (cache_fplookup_dotdot(fpl)); 4872 } 4873 } 4874 4875 MPASS(!cache_fpl_isdotdot(cnp)); 4876 4877 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 4878 4879 CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 4880 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 4881 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 4882 break; 4883 } 4884 4885 if (__predict_false(ncp == NULL)) { 4886 if (cnp->cn_nameptr[0] == '/') { 4887 return (cache_fplookup_skip_slashes(fpl)); 4888 } 4889 return (cache_fplookup_noentry(fpl)); 4890 } 4891 4892 tvp = atomic_load_ptr(&ncp->nc_vp); 4893 nc_flag = atomic_load_char(&ncp->nc_flag); 4894 if ((nc_flag & NCF_NEGATIVE) != 0) { 4895 return (cache_fplookup_neg(fpl, ncp, hash)); 4896 } 4897 4898 if (!cache_ncp_canuse(ncp)) { 4899 return (cache_fpl_partial(fpl)); 4900 } 4901 4902 fpl->tvp = tvp; 4903 fpl->tvp_seqc = vn_seqc_read_any(tvp); 4904 if (seqc_in_modify(fpl->tvp_seqc)) { 4905 return (cache_fpl_partial(fpl)); 4906 } 4907 4908 counter_u64_add(numposhits, 1); 4909 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, tvp); 4910 4911 error = 0; 4912 if (cache_fplookup_is_mp(fpl)) { 4913 error = cache_fplookup_cross_mount(fpl); 4914 } 4915 return (error); 4916 } 4917 4918 static bool 4919 cache_fplookup_mp_supported(struct mount *mp) 4920 { 4921 4922 MPASS(mp != NULL); 4923 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) == 0) 4924 return (false); 4925 return (true); 4926 } 4927 4928 /* 4929 * Walk up the mount stack (if any). 4930 * 4931 * Correctness is provided in the following ways: 4932 * - all vnodes are protected from freeing with SMR 4933 * - struct mount objects are type stable making them always safe to access 4934 * - stability of the particular mount is provided by busying it 4935 * - relationship between the vnode which is mounted on and the mount is 4936 * verified with the vnode sequence counter after busying 4937 * - association between root vnode of the mount and the mount is protected 4938 * by busy 4939 * 4940 * From that point on we can read the sequence counter of the root vnode 4941 * and get the next mount on the stack (if any) using the same protection. 4942 * 4943 * By the end of successful walk we are guaranteed the reached state was 4944 * indeed present at least at some point which matches the regular lookup. 4945 */ 4946 static int __noinline 4947 cache_fplookup_climb_mount(struct cache_fpl *fpl) 4948 { 4949 struct mount *mp, *prev_mp; 4950 struct mount_pcpu *mpcpu, *prev_mpcpu; 4951 struct vnode *vp; 4952 seqc_t vp_seqc; 4953 4954 vp = fpl->tvp; 4955 vp_seqc = fpl->tvp_seqc; 4956 4957 VNPASS(vp->v_type == VDIR || vp->v_type == VBAD, vp); 4958 mp = atomic_load_ptr(&vp->v_mountedhere); 4959 if (__predict_false(mp == NULL)) { 4960 return (0); 4961 } 4962 4963 prev_mp = NULL; 4964 for (;;) { 4965 if (!vfs_op_thread_enter_crit(mp, mpcpu)) { 4966 if (prev_mp != NULL) 4967 vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); 4968 return (cache_fpl_partial(fpl)); 4969 } 4970 if (prev_mp != NULL) 4971 vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); 4972 if (!vn_seqc_consistent(vp, vp_seqc)) { 4973 vfs_op_thread_exit_crit(mp, mpcpu); 4974 return (cache_fpl_partial(fpl)); 4975 } 4976 if (!cache_fplookup_mp_supported(mp)) { 4977 vfs_op_thread_exit_crit(mp, mpcpu); 4978 return (cache_fpl_partial(fpl)); 4979 } 4980 vp = atomic_load_ptr(&mp->mnt_rootvnode); 4981 if (vp == NULL) { 4982 vfs_op_thread_exit_crit(mp, mpcpu); 4983 return (cache_fpl_partial(fpl)); 4984 } 4985 vp_seqc = vn_seqc_read_any(vp); 4986 if (seqc_in_modify(vp_seqc)) { 4987 vfs_op_thread_exit_crit(mp, mpcpu); 4988 return (cache_fpl_partial(fpl)); 4989 } 4990 prev_mp = mp; 4991 prev_mpcpu = mpcpu; 4992 mp = atomic_load_ptr(&vp->v_mountedhere); 4993 if (mp == NULL) 4994 break; 4995 } 4996 4997 vfs_op_thread_exit_crit(prev_mp, prev_mpcpu); 4998 fpl->tvp = vp; 4999 fpl->tvp_seqc = vp_seqc; 5000 return (0); 5001 } 5002 5003 static int __noinline 5004 cache_fplookup_cross_mount(struct cache_fpl *fpl) 5005 { 5006 struct mount *mp; 5007 struct mount_pcpu *mpcpu; 5008 struct vnode *vp; 5009 seqc_t vp_seqc; 5010 5011 vp = fpl->tvp; 5012 vp_seqc = fpl->tvp_seqc; 5013 5014 VNPASS(vp->v_type == VDIR || vp->v_type == VBAD, vp); 5015 mp = atomic_load_ptr(&vp->v_mountedhere); 5016 if (__predict_false(mp == NULL)) { 5017 return (0); 5018 } 5019 5020 if (!vfs_op_thread_enter_crit(mp, mpcpu)) { 5021 return (cache_fpl_partial(fpl)); 5022 } 5023 if (!vn_seqc_consistent(vp, vp_seqc)) { 5024 vfs_op_thread_exit_crit(mp, mpcpu); 5025 return (cache_fpl_partial(fpl)); 5026 } 5027 if (!cache_fplookup_mp_supported(mp)) { 5028 vfs_op_thread_exit_crit(mp, mpcpu); 5029 return (cache_fpl_partial(fpl)); 5030 } 5031 vp = atomic_load_ptr(&mp->mnt_rootvnode); 5032 if (__predict_false(vp == NULL)) { 5033 vfs_op_thread_exit_crit(mp, mpcpu); 5034 return (cache_fpl_partial(fpl)); 5035 } 5036 vp_seqc = vn_seqc_read_any(vp); 5037 vfs_op_thread_exit_crit(mp, mpcpu); 5038 if (seqc_in_modify(vp_seqc)) { 5039 return (cache_fpl_partial(fpl)); 5040 } 5041 mp = atomic_load_ptr(&vp->v_mountedhere); 5042 if (__predict_false(mp != NULL)) { 5043 /* 5044 * There are possibly more mount points on top. 5045 * Normally this does not happen so for simplicity just start 5046 * over. 5047 */ 5048 return (cache_fplookup_climb_mount(fpl)); 5049 } 5050 5051 fpl->tvp = vp; 5052 fpl->tvp_seqc = vp_seqc; 5053 return (0); 5054 } 5055 5056 /* 5057 * Check if a vnode is mounted on. 5058 */ 5059 static bool 5060 cache_fplookup_is_mp(struct cache_fpl *fpl) 5061 { 5062 struct vnode *vp; 5063 5064 vp = fpl->tvp; 5065 return ((vn_irflag_read(vp) & VIRF_MOUNTPOINT) != 0); 5066 } 5067 5068 /* 5069 * Parse the path. 5070 * 5071 * The code was originally copy-pasted from regular lookup and despite 5072 * clean ups leaves performance on the table. Any modifications here 5073 * must take into account that in case off fallback the resulting 5074 * nameidata state has to be compatible with the original. 5075 */ 5076 5077 /* 5078 * Debug ni_pathlen tracking. 5079 */ 5080 #ifdef INVARIANTS 5081 static void 5082 cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n) 5083 { 5084 5085 fpl->debug.ni_pathlen += n; 5086 KASSERT(fpl->debug.ni_pathlen <= PATH_MAX, 5087 ("%s: pathlen overflow to %zd\n", __func__, fpl->debug.ni_pathlen)); 5088 } 5089 5090 static void 5091 cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n) 5092 { 5093 5094 fpl->debug.ni_pathlen -= n; 5095 KASSERT(fpl->debug.ni_pathlen <= PATH_MAX, 5096 ("%s: pathlen underflow to %zd\n", __func__, fpl->debug.ni_pathlen)); 5097 } 5098 5099 static void 5100 cache_fpl_pathlen_inc(struct cache_fpl *fpl) 5101 { 5102 5103 cache_fpl_pathlen_add(fpl, 1); 5104 } 5105 5106 static void 5107 cache_fpl_pathlen_dec(struct cache_fpl *fpl) 5108 { 5109 5110 cache_fpl_pathlen_sub(fpl, 1); 5111 } 5112 #else 5113 static void 5114 cache_fpl_pathlen_add(struct cache_fpl *fpl, size_t n) 5115 { 5116 } 5117 5118 static void 5119 cache_fpl_pathlen_sub(struct cache_fpl *fpl, size_t n) 5120 { 5121 } 5122 5123 static void 5124 cache_fpl_pathlen_inc(struct cache_fpl *fpl) 5125 { 5126 } 5127 5128 static void 5129 cache_fpl_pathlen_dec(struct cache_fpl *fpl) 5130 { 5131 } 5132 #endif 5133 5134 static int __always_inline 5135 cache_fplookup_preparse(struct cache_fpl *fpl) 5136 { 5137 struct componentname *cnp; 5138 5139 cnp = fpl->cnp; 5140 5141 if (__predict_false(cnp->cn_nameptr[0] == '\0')) { 5142 return (cache_fplookup_degenerate(fpl)); 5143 } 5144 5145 /* 5146 * By this point the shortest possible pathname is one character + nul 5147 * terminator, hence 2. 5148 */ 5149 KASSERT(fpl->debug.ni_pathlen >= 2, ("%s: pathlen %zu\n", __func__, 5150 fpl->debug.ni_pathlen)); 5151 KASSERT(&cnp->cn_nameptr[fpl->debug.ni_pathlen - 2] == fpl->nulchar - 1, 5152 ("%s: mismatch on string (%p != %p) [%s]\n", __func__, 5153 &cnp->cn_nameptr[fpl->debug.ni_pathlen - 2], fpl->nulchar - 1, 5154 cnp->cn_pnbuf)); 5155 if (__predict_false(*(fpl->nulchar - 1) == '/')) { 5156 /* 5157 * TODO 5158 * Regular lookup performs the following: 5159 * *ndp->ni_next = '\0'; 5160 * cnp->cn_flags |= TRAILINGSLASH; 5161 * 5162 * Which is problematic since it modifies data read 5163 * from userspace. Then if fast path lookup was to 5164 * abort we would have to either restore it or convey 5165 * the flag. Since this is a corner case just ignore 5166 * it for simplicity. 5167 */ 5168 return (cache_fpl_aborted(fpl)); 5169 } 5170 return (0); 5171 } 5172 5173 static int 5174 cache_fplookup_parse(struct cache_fpl *fpl) 5175 { 5176 struct nameidata *ndp; 5177 struct componentname *cnp; 5178 char *cp; 5179 5180 ndp = fpl->ndp; 5181 cnp = fpl->cnp; 5182 5183 /* 5184 * Find the end of this path component, it is either / or nul. 5185 * 5186 * Store / as a temporary sentinel so that we only have one character 5187 * to test for. Pathnames tend to be short so this should not be 5188 * resulting in cache misses. 5189 */ 5190 KASSERT(&cnp->cn_nameptr[fpl->debug.ni_pathlen - 1] == fpl->nulchar, 5191 ("%s: mismatch between pathlen (%zu) and nulchar (%p != %p), string [%s]\n", 5192 __func__, fpl->debug.ni_pathlen, &cnp->cn_nameptr[fpl->debug.ni_pathlen - 1], 5193 fpl->nulchar, cnp->cn_pnbuf)); 5194 KASSERT(*fpl->nulchar == '\0', 5195 ("%s: expected nul at %p; string [%s]\n", __func__, fpl->nulchar, 5196 cnp->cn_pnbuf)); 5197 *fpl->nulchar = '/'; 5198 for (cp = cnp->cn_nameptr; *cp != '/'; cp++) { 5199 KASSERT(*cp != '\0', 5200 ("%s: encountered unexpected nul; string [%s]\n", __func__, 5201 cnp->cn_nameptr)); 5202 continue; 5203 } 5204 *fpl->nulchar = '\0'; 5205 5206 cnp->cn_namelen = cp - cnp->cn_nameptr; 5207 cache_fpl_pathlen_sub(fpl, cnp->cn_namelen); 5208 /* 5209 * Hack: we have to check if the found path component's length exceeds 5210 * NAME_MAX. However, the condition is very rarely true and check can 5211 * be elided in the common case -- if an entry was found in the cache, 5212 * then it could not have been too long to begin with. 5213 */ 5214 ndp->ni_next = cp; 5215 5216 #ifdef INVARIANTS 5217 /* 5218 * Code below is only here to assure compatibility with regular lookup. 5219 * It covers handling of trailing slashes and names like "/", both of 5220 * which of can be taken care of upfront which lockless lookup does 5221 * in cache_fplookup_preparse. Regular lookup performs these for each 5222 * path component. 5223 */ 5224 while (*cp == '/' && (cp[1] == '/' || cp[1] == '\0')) { 5225 cp++; 5226 if (*cp == '\0') { 5227 panic("%s: ran into TRAILINGSLASH handling from [%s]\n", 5228 __func__, cnp->cn_pnbuf); 5229 } 5230 } 5231 5232 if (cnp->cn_nameptr[0] == '\0') { 5233 panic("%s: ran into degenerate name from [%s]\n", __func__, cnp->cn_pnbuf); 5234 } 5235 #endif 5236 return (0); 5237 } 5238 5239 static void 5240 cache_fplookup_parse_advance(struct cache_fpl *fpl) 5241 { 5242 struct nameidata *ndp; 5243 struct componentname *cnp; 5244 5245 ndp = fpl->ndp; 5246 cnp = fpl->cnp; 5247 5248 cnp->cn_nameptr = ndp->ni_next; 5249 KASSERT(*(cnp->cn_nameptr) == '/', 5250 ("%s: should have seen slash at %p ; buf %p [%s]\n", __func__, 5251 cnp->cn_nameptr, cnp->cn_pnbuf, cnp->cn_pnbuf)); 5252 cnp->cn_nameptr++; 5253 cache_fpl_pathlen_dec(fpl); 5254 } 5255 5256 /* 5257 * Skip spurious slashes in a pathname (e.g., "foo///bar") and retry. 5258 * 5259 * Lockless lookup tries to elide checking for spurious slashes and should they 5260 * be present is guaranteed to fail to find an entry. In this case the caller 5261 * must check if the name starts with a slash and call this routine. It is 5262 * going to fast forward across the spurious slashes and set the state up for 5263 * retry. 5264 */ 5265 static int __noinline 5266 cache_fplookup_skip_slashes(struct cache_fpl *fpl) 5267 { 5268 struct nameidata *ndp; 5269 struct componentname *cnp; 5270 5271 ndp = fpl->ndp; 5272 cnp = fpl->cnp; 5273 5274 MPASS(*(cnp->cn_nameptr) == '/'); 5275 do { 5276 cnp->cn_nameptr++; 5277 cache_fpl_pathlen_dec(fpl); 5278 } while (*(cnp->cn_nameptr) == '/'); 5279 5280 /* 5281 * Go back to one slash so that cache_fplookup_parse_advance has 5282 * something to skip. 5283 */ 5284 cnp->cn_nameptr--; 5285 cache_fpl_pathlen_inc(fpl); 5286 5287 /* 5288 * cache_fplookup_parse_advance starts from ndp->ni_next 5289 */ 5290 ndp->ni_next = cnp->cn_nameptr; 5291 5292 /* 5293 * See cache_fplookup_dot. 5294 */ 5295 fpl->tvp = fpl->dvp; 5296 fpl->tvp_seqc = fpl->dvp_seqc; 5297 5298 return (0); 5299 } 5300 5301 /* 5302 * See the API contract for VOP_FPLOOKUP_VEXEC. 5303 */ 5304 static int __noinline 5305 cache_fplookup_failed_vexec(struct cache_fpl *fpl, int error) 5306 { 5307 struct componentname *cnp; 5308 struct vnode *dvp; 5309 seqc_t dvp_seqc; 5310 5311 cnp = fpl->cnp; 5312 dvp = fpl->dvp; 5313 dvp_seqc = fpl->dvp_seqc; 5314 5315 /* 5316 * Hack: delayed name len checking. 5317 */ 5318 if (__predict_false(cnp->cn_namelen > NAME_MAX)) { 5319 cache_fpl_smr_exit(fpl); 5320 return (cache_fpl_handled_error(fpl, ENAMETOOLONG)); 5321 } 5322 5323 /* 5324 * Hack: they may be looking up foo/bar, where foo is a 5325 * regular file. In such a case we need to turn ENOTDIR, 5326 * but we may happen to get here with a different error. 5327 */ 5328 if (dvp->v_type != VDIR) { 5329 /* 5330 * The check here is predominantly to catch 5331 * EOPNOTSUPP from dead_vnodeops. If the vnode 5332 * gets doomed past this point it is going to 5333 * fail seqc verification. 5334 */ 5335 if (VN_IS_DOOMED(dvp)) { 5336 return (cache_fpl_aborted(fpl)); 5337 } 5338 error = ENOTDIR; 5339 } 5340 5341 /* 5342 * Hack: handle O_SEARCH. 5343 * 5344 * Open Group Base Specifications Issue 7, 2018 edition states: 5345 * <quote> 5346 * If the access mode of the open file description associated with the 5347 * file descriptor is not O_SEARCH, the function shall check whether 5348 * directory searches are permitted using the current permissions of 5349 * the directory underlying the file descriptor. If the access mode is 5350 * O_SEARCH, the function shall not perform the check. 5351 * </quote> 5352 * 5353 * Regular lookup tests for the NOEXECCHECK flag for every path 5354 * component to decide whether to do the permission check. However, 5355 * since most lookups never have the flag (and when they do it is only 5356 * present for the first path component), lockless lookup only acts on 5357 * it if there is a permission problem. Here the flag is represented 5358 * with a boolean so that we don't have to clear it on the way out. 5359 * 5360 * For simplicity this always aborts. 5361 * TODO: check if this is the first lookup and ignore the permission 5362 * problem. Note the flag has to survive fallback (if it happens to be 5363 * performed). 5364 */ 5365 if (fpl->fsearch) { 5366 return (cache_fpl_aborted(fpl)); 5367 } 5368 5369 switch (error) { 5370 case EAGAIN: 5371 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 5372 error = cache_fpl_aborted(fpl); 5373 } else { 5374 cache_fpl_partial(fpl); 5375 } 5376 break; 5377 default: 5378 if (!vn_seqc_consistent(dvp, dvp_seqc)) { 5379 error = cache_fpl_aborted(fpl); 5380 } else { 5381 cache_fpl_smr_exit(fpl); 5382 cache_fpl_handled_error(fpl, error); 5383 } 5384 break; 5385 } 5386 return (error); 5387 } 5388 5389 static int 5390 cache_fplookup_impl(struct vnode *dvp, struct cache_fpl *fpl) 5391 { 5392 struct nameidata *ndp; 5393 struct componentname *cnp; 5394 struct mount *mp; 5395 int error; 5396 5397 ndp = fpl->ndp; 5398 cnp = fpl->cnp; 5399 5400 cache_fpl_checkpoint(fpl); 5401 5402 /* 5403 * The vnode at hand is almost always stable, skip checking for it. 5404 * Worst case this postpones the check towards the end of the iteration 5405 * of the main loop. 5406 */ 5407 fpl->dvp = dvp; 5408 fpl->dvp_seqc = vn_seqc_read_notmodify(fpl->dvp); 5409 5410 mp = atomic_load_ptr(&dvp->v_mount); 5411 if (__predict_false(mp == NULL || !cache_fplookup_mp_supported(mp))) { 5412 return (cache_fpl_aborted(fpl)); 5413 } 5414 5415 error = cache_fplookup_preparse(fpl); 5416 if (__predict_false(cache_fpl_terminated(fpl))) { 5417 return (error); 5418 } 5419 5420 for (;;) { 5421 error = cache_fplookup_parse(fpl); 5422 if (__predict_false(error != 0)) { 5423 break; 5424 } 5425 5426 error = VOP_FPLOOKUP_VEXEC(fpl->dvp, cnp->cn_cred); 5427 if (__predict_false(error != 0)) { 5428 error = cache_fplookup_failed_vexec(fpl, error); 5429 break; 5430 } 5431 5432 error = cache_fplookup_next(fpl); 5433 if (__predict_false(cache_fpl_terminated(fpl))) { 5434 break; 5435 } 5436 5437 VNPASS(!seqc_in_modify(fpl->tvp_seqc), fpl->tvp); 5438 5439 if (fpl->tvp->v_type == VLNK) { 5440 error = cache_fplookup_symlink(fpl); 5441 if (cache_fpl_terminated(fpl)) { 5442 break; 5443 } 5444 } else { 5445 if (cache_fpl_islastcn(ndp)) { 5446 error = cache_fplookup_final(fpl); 5447 break; 5448 } 5449 5450 if (!vn_seqc_consistent(fpl->dvp, fpl->dvp_seqc)) { 5451 error = cache_fpl_aborted(fpl); 5452 break; 5453 } 5454 5455 fpl->dvp = fpl->tvp; 5456 fpl->dvp_seqc = fpl->tvp_seqc; 5457 cache_fplookup_parse_advance(fpl); 5458 } 5459 5460 cache_fpl_checkpoint(fpl); 5461 } 5462 5463 return (error); 5464 } 5465 5466 /* 5467 * Fast path lookup protected with SMR and sequence counters. 5468 * 5469 * Note: all VOP_FPLOOKUP_VEXEC routines have a comment referencing this one. 5470 * 5471 * Filesystems can opt in by setting the MNTK_FPLOOKUP flag and meeting criteria 5472 * outlined below. 5473 * 5474 * Traditional vnode lookup conceptually looks like this: 5475 * 5476 * vn_lock(current); 5477 * for (;;) { 5478 * next = find(); 5479 * vn_lock(next); 5480 * vn_unlock(current); 5481 * current = next; 5482 * if (last) 5483 * break; 5484 * } 5485 * return (current); 5486 * 5487 * Each jump to the next vnode is safe memory-wise and atomic with respect to 5488 * any modifications thanks to holding respective locks. 5489 * 5490 * The same guarantee can be provided with a combination of safe memory 5491 * reclamation and sequence counters instead. If all operations which affect 5492 * the relationship between the current vnode and the one we are looking for 5493 * also modify the counter, we can verify whether all the conditions held as 5494 * we made the jump. This includes things like permissions, mount points etc. 5495 * Counter modification is provided by enclosing relevant places in 5496 * vn_seqc_write_begin()/end() calls. 5497 * 5498 * Thus this translates to: 5499 * 5500 * vfs_smr_enter(); 5501 * dvp_seqc = seqc_read_any(dvp); 5502 * if (seqc_in_modify(dvp_seqc)) // someone is altering the vnode 5503 * abort(); 5504 * for (;;) { 5505 * tvp = find(); 5506 * tvp_seqc = seqc_read_any(tvp); 5507 * if (seqc_in_modify(tvp_seqc)) // someone is altering the target vnode 5508 * abort(); 5509 * if (!seqc_consistent(dvp, dvp_seqc) // someone is altering the vnode 5510 * abort(); 5511 * dvp = tvp; // we know nothing of importance has changed 5512 * dvp_seqc = tvp_seqc; // store the counter for the tvp iteration 5513 * if (last) 5514 * break; 5515 * } 5516 * vget(); // secure the vnode 5517 * if (!seqc_consistent(tvp, tvp_seqc) // final check 5518 * abort(); 5519 * // at this point we know nothing has changed for any parent<->child pair 5520 * // as they were crossed during the lookup, meaning we matched the guarantee 5521 * // of the locked variant 5522 * return (tvp); 5523 * 5524 * The API contract for VOP_FPLOOKUP_VEXEC routines is as follows: 5525 * - they are called while within vfs_smr protection which they must never exit 5526 * - EAGAIN can be returned to denote checking could not be performed, it is 5527 * always valid to return it 5528 * - if the sequence counter has not changed the result must be valid 5529 * - if the sequence counter has changed both false positives and false negatives 5530 * are permitted (since the result will be rejected later) 5531 * - for simple cases of unix permission checks vaccess_vexec_smr can be used 5532 * 5533 * Caveats to watch out for: 5534 * - vnodes are passed unlocked and unreferenced with nothing stopping 5535 * VOP_RECLAIM, in turn meaning that ->v_data can become NULL. It is advised 5536 * to use atomic_load_ptr to fetch it. 5537 * - the aforementioned object can also get freed, meaning absent other means it 5538 * should be protected with vfs_smr 5539 * - either safely checking permissions as they are modified or guaranteeing 5540 * their stability is left to the routine 5541 */ 5542 int 5543 cache_fplookup(struct nameidata *ndp, enum cache_fpl_status *status, 5544 struct pwd **pwdp) 5545 { 5546 struct cache_fpl fpl; 5547 struct pwd *pwd; 5548 struct vnode *dvp; 5549 struct componentname *cnp; 5550 int error; 5551 5552 fpl.status = CACHE_FPL_STATUS_UNSET; 5553 fpl.in_smr = false; 5554 fpl.ndp = ndp; 5555 fpl.cnp = cnp = &ndp->ni_cnd; 5556 MPASS(ndp->ni_lcf == 0); 5557 MPASS(curthread == cnp->cn_thread); 5558 KASSERT ((cnp->cn_flags & CACHE_FPL_INTERNAL_CN_FLAGS) == 0, 5559 ("%s: internal flags found in cn_flags %" PRIx64, __func__, 5560 cnp->cn_flags)); 5561 if ((cnp->cn_flags & SAVESTART) != 0) { 5562 MPASS(cnp->cn_nameiop != LOOKUP); 5563 } 5564 MPASS(cnp->cn_nameptr == cnp->cn_pnbuf); 5565 5566 if (__predict_false(!cache_can_fplookup(&fpl))) { 5567 *status = fpl.status; 5568 SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status); 5569 return (EOPNOTSUPP); 5570 } 5571 5572 cache_fpl_checkpoint_outer(&fpl); 5573 5574 cache_fpl_smr_enter_initial(&fpl); 5575 #ifdef INVARIANTS 5576 fpl.debug.ni_pathlen = ndp->ni_pathlen; 5577 #endif 5578 fpl.nulchar = &cnp->cn_nameptr[ndp->ni_pathlen - 1]; 5579 fpl.fsearch = false; 5580 fpl.savename = (cnp->cn_flags & SAVENAME) != 0; 5581 fpl.pwd = pwdp; 5582 pwd = pwd_get_smr(); 5583 *(fpl.pwd) = pwd; 5584 ndp->ni_rootdir = pwd->pwd_rdir; 5585 ndp->ni_topdir = pwd->pwd_jdir; 5586 5587 if (cnp->cn_pnbuf[0] == '/') { 5588 dvp = cache_fpl_handle_root(&fpl); 5589 MPASS(ndp->ni_resflags == 0); 5590 ndp->ni_resflags = NIRES_ABS; 5591 } else { 5592 if (ndp->ni_dirfd == AT_FDCWD) { 5593 dvp = pwd->pwd_cdir; 5594 } else { 5595 error = cache_fplookup_dirfd(&fpl, &dvp); 5596 if (__predict_false(error != 0)) { 5597 goto out; 5598 } 5599 } 5600 } 5601 5602 SDT_PROBE4(vfs, namei, lookup, entry, dvp, cnp->cn_pnbuf, cnp->cn_flags, true); 5603 error = cache_fplookup_impl(dvp, &fpl); 5604 out: 5605 cache_fpl_smr_assert_not_entered(&fpl); 5606 cache_fpl_assert_status(&fpl); 5607 *status = fpl.status; 5608 if (SDT_PROBES_ENABLED()) { 5609 SDT_PROBE3(vfs, fplookup, lookup, done, ndp, fpl.line, fpl.status); 5610 if (fpl.status == CACHE_FPL_STATUS_HANDLED) 5611 SDT_PROBE4(vfs, namei, lookup, return, error, ndp->ni_vp, true, 5612 ndp); 5613 } 5614 5615 if (__predict_true(fpl.status == CACHE_FPL_STATUS_HANDLED)) { 5616 MPASS(error != CACHE_FPL_FAILED); 5617 if (error != 0) { 5618 MPASS(fpl.dvp == NULL); 5619 MPASS(fpl.tvp == NULL); 5620 MPASS(fpl.savename == false); 5621 } 5622 ndp->ni_dvp = fpl.dvp; 5623 ndp->ni_vp = fpl.tvp; 5624 if (fpl.savename) { 5625 cnp->cn_flags |= HASBUF; 5626 } else { 5627 cache_fpl_cleanup_cnp(cnp); 5628 } 5629 } 5630 return (error); 5631 } 5632