1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Poul-Henning Kamp of the FreeBSD Project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_ktrace.h" 41 42 #include <sys/param.h> 43 #include <sys/systm.h> 44 #include <sys/counter.h> 45 #include <sys/filedesc.h> 46 #include <sys/fnv_hash.h> 47 #include <sys/kernel.h> 48 #include <sys/lock.h> 49 #include <sys/malloc.h> 50 #include <sys/fcntl.h> 51 #include <sys/mount.h> 52 #include <sys/namei.h> 53 #include <sys/proc.h> 54 #include <sys/rwlock.h> 55 #include <sys/sdt.h> 56 #include <sys/smp.h> 57 #include <sys/syscallsubr.h> 58 #include <sys/sysctl.h> 59 #include <sys/sysproto.h> 60 #include <sys/vnode.h> 61 #ifdef KTRACE 62 #include <sys/ktrace.h> 63 #endif 64 65 #include <vm/uma.h> 66 67 SDT_PROVIDER_DECLARE(vfs); 68 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 69 "struct vnode *"); 70 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 71 "char *"); 72 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 73 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 74 "char *", "struct vnode *"); 75 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 76 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 77 "struct vnode *", "char *"); 78 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 79 "struct vnode *"); 80 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 81 "struct vnode *", "char *"); 82 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 83 "char *"); 84 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *"); 85 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 86 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 87 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 88 "struct vnode *"); 89 SDT_PROBE_DEFINE3(vfs, namecache, zap_negative, done, "struct vnode *", 90 "char *", "int"); 91 SDT_PROBE_DEFINE3(vfs, namecache, shrink_negative, done, "struct vnode *", 92 "char *", "int"); 93 94 /* 95 * This structure describes the elements in the cache of recent 96 * names looked up by namei. 97 */ 98 99 struct namecache { 100 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 101 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 102 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 103 struct vnode *nc_dvp; /* vnode of parent of name */ 104 union { 105 struct vnode *nu_vp; /* vnode the name refers to */ 106 u_int nu_neghits; /* negative entry hits */ 107 } n_un; 108 u_char nc_flag; /* flag bits */ 109 u_char nc_nlen; /* length of name */ 110 char nc_name[0]; /* segment name + nul */ 111 }; 112 113 /* 114 * struct namecache_ts repeats struct namecache layout up to the 115 * nc_nlen member. 116 * struct namecache_ts is used in place of struct namecache when time(s) need 117 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 118 * both a non-dotdot directory name plus dotdot for the directory's 119 * parent. 120 */ 121 struct namecache_ts { 122 struct timespec nc_time; /* timespec provided by fs */ 123 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 124 int nc_ticks; /* ticks value when entry was added */ 125 struct namecache nc_nc; 126 }; 127 128 #define nc_vp n_un.nu_vp 129 #define nc_neghits n_un.nu_neghits 130 131 /* 132 * Flags in namecache.nc_flag 133 */ 134 #define NCF_WHITE 0x01 135 #define NCF_ISDOTDOT 0x02 136 #define NCF_TS 0x04 137 #define NCF_DTS 0x08 138 #define NCF_DVDROP 0x10 139 #define NCF_NEGATIVE 0x20 140 #define NCF_HOTNEGATIVE 0x40 141 142 /* 143 * Name caching works as follows: 144 * 145 * Names found by directory scans are retained in a cache 146 * for future reference. It is managed LRU, so frequently 147 * used names will hang around. Cache is indexed by hash value 148 * obtained from (vp, name) where vp refers to the directory 149 * containing name. 150 * 151 * If it is a "negative" entry, (i.e. for a name that is known NOT to 152 * exist) the vnode pointer will be NULL. 153 * 154 * Upon reaching the last segment of a path, if the reference 155 * is for DELETE, or NOCACHE is set (rewrite), and the 156 * name is located in the cache, it will be dropped. 157 * 158 * These locks are used (in the order in which they can be taken): 159 * NAME TYPE ROLE 160 * vnodelock mtx vnode lists and v_cache_dd field protection 161 * bucketlock rwlock for access to given set of hash buckets 162 * neglist mtx negative entry LRU management 163 * 164 * Additionally, ncneg_shrink_lock mtx is used to have at most one thread 165 * shrinking the LRU list. 166 * 167 * It is legal to take multiple vnodelock and bucketlock locks. The locking 168 * order is lower address first. Both are recursive. 169 * 170 * "." lookups are lockless. 171 * 172 * ".." and vnode -> name lookups require vnodelock. 173 * 174 * name -> vnode lookup requires the relevant bucketlock to be held for reading. 175 * 176 * Insertions and removals of entries require involved vnodes and bucketlocks 177 * to be write-locked to prevent other threads from seeing the entry. 178 * 179 * Some lookups result in removal of the found entry (e.g. getting rid of a 180 * negative entry with the intent to create a positive one), which poses a 181 * problem when multiple threads reach the state. Similarly, two different 182 * threads can purge two different vnodes and try to remove the same name. 183 * 184 * If the already held vnode lock is lower than the second required lock, we 185 * can just take the other lock. However, in the opposite case, this could 186 * deadlock. As such, this is resolved by trylocking and if that fails unlocking 187 * the first node, locking everything in order and revalidating the state. 188 */ 189 190 /* 191 * Structures associated with name caching. 192 */ 193 #define NCHHASH(hash) \ 194 (&nchashtbl[(hash) & nchash]) 195 static __read_mostly LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */ 196 static u_long __read_mostly nchash; /* size of hash table */ 197 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 198 "Size of namecache hash table"); 199 static u_long __read_mostly ncnegfactor = 12; /* ratio of negative entries */ 200 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 201 "Ratio of negative namecache entries"); 202 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */ 203 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 204 "Number of negative entries in namecache"); 205 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */ 206 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 207 "Number of namecache entries"); 208 static u_long __exclusive_cache_line numcachehv;/* number of cache entries with vnodes held */ 209 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, 210 "Number of namecache entries with vnodes held"); 211 u_int __read_mostly ncsizefactor = 2; 212 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0, 213 "Size factor for namecache"); 214 static u_int __read_mostly ncpurgeminvnodes; 215 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0, 216 "Number of vnodes below which purgevfs ignores the request"); 217 static u_int __read_mostly ncneghitsrequeue = 8; 218 SYSCTL_UINT(_vfs, OID_AUTO, ncneghitsrequeue, CTLFLAG_RW, &ncneghitsrequeue, 0, 219 "Number of hits to requeue a negative entry in the LRU list"); 220 221 struct nchstats nchstats; /* cache effectiveness statistics */ 222 223 static struct mtx ncneg_shrink_lock; 224 static int shrink_list_turn; 225 226 struct neglist { 227 struct mtx nl_lock; 228 TAILQ_HEAD(, namecache) nl_list; 229 } __aligned(CACHE_LINE_SIZE); 230 231 static struct neglist __read_mostly *neglists; 232 static struct neglist ncneg_hot; 233 234 #define numneglists (ncneghash + 1) 235 static u_int __read_mostly ncneghash; 236 static inline struct neglist * 237 NCP2NEGLIST(struct namecache *ncp) 238 { 239 240 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]); 241 } 242 243 #define numbucketlocks (ncbuckethash + 1) 244 static u_int __read_mostly ncbuckethash; 245 static struct rwlock_padalign __read_mostly *bucketlocks; 246 #define HASH2BUCKETLOCK(hash) \ 247 ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)])) 248 249 #define numvnodelocks (ncvnodehash + 1) 250 static u_int __read_mostly ncvnodehash; 251 static struct mtx __read_mostly *vnodelocks; 252 static inline struct mtx * 253 VP2VNODELOCK(struct vnode *vp) 254 { 255 256 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]); 257 } 258 259 /* 260 * UMA zones for the VFS cache. 261 * 262 * The small cache is used for entries with short names, which are the 263 * most common. The large cache is used for entries which are too big to 264 * fit in the small cache. 265 */ 266 static uma_zone_t __read_mostly cache_zone_small; 267 static uma_zone_t __read_mostly cache_zone_small_ts; 268 static uma_zone_t __read_mostly cache_zone_large; 269 static uma_zone_t __read_mostly cache_zone_large_ts; 270 271 #define CACHE_PATH_CUTOFF 35 272 273 static struct namecache * 274 cache_alloc(int len, int ts) 275 { 276 struct namecache_ts *ncp_ts; 277 struct namecache *ncp; 278 279 if (__predict_false(ts)) { 280 if (len <= CACHE_PATH_CUTOFF) 281 ncp_ts = uma_zalloc(cache_zone_small_ts, M_WAITOK); 282 else 283 ncp_ts = uma_zalloc(cache_zone_large_ts, M_WAITOK); 284 ncp = &ncp_ts->nc_nc; 285 } else { 286 if (len <= CACHE_PATH_CUTOFF) 287 ncp = uma_zalloc(cache_zone_small, M_WAITOK); 288 else 289 ncp = uma_zalloc(cache_zone_large, M_WAITOK); 290 } 291 return (ncp); 292 } 293 294 static void 295 cache_free(struct namecache *ncp) 296 { 297 struct namecache_ts *ncp_ts; 298 299 if (ncp == NULL) 300 return; 301 if ((ncp->nc_flag & NCF_DVDROP) != 0) 302 vdrop(ncp->nc_dvp); 303 if (__predict_false(ncp->nc_flag & NCF_TS)) { 304 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 305 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 306 uma_zfree(cache_zone_small_ts, ncp_ts); 307 else 308 uma_zfree(cache_zone_large_ts, ncp_ts); 309 } else { 310 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 311 uma_zfree(cache_zone_small, ncp); 312 else 313 uma_zfree(cache_zone_large, ncp); 314 } 315 } 316 317 static void 318 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 319 { 320 struct namecache_ts *ncp_ts; 321 322 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 323 (tsp == NULL && ticksp == NULL), 324 ("No NCF_TS")); 325 326 if (tsp == NULL && ticksp == NULL) 327 return; 328 329 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 330 if (tsp != NULL) 331 *tsp = ncp_ts->nc_time; 332 if (ticksp != NULL) 333 *ticksp = ncp_ts->nc_ticks; 334 } 335 336 static int __read_mostly doingcache = 1; /* 1 => enable the cache */ 337 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 338 "VFS namecache enabled"); 339 340 /* Export size information to userland */ 341 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 342 sizeof(struct namecache), "sizeof(struct namecache)"); 343 344 /* 345 * The new name cache statistics 346 */ 347 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, 348 "Name cache statistics"); 349 #define STATNODE_ULONG(name, descr) \ 350 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr); 351 #define STATNODE_COUNTER(name, descr) \ 352 static counter_u64_t __read_mostly name; \ 353 SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr); 354 STATNODE_ULONG(numneg, "Number of negative cache entries"); 355 STATNODE_ULONG(numcache, "Number of cache entries"); 356 STATNODE_COUNTER(numcalls, "Number of cache lookups"); 357 STATNODE_COUNTER(dothits, "Number of '.' hits"); 358 STATNODE_COUNTER(dotdothits, "Number of '..' hits"); 359 STATNODE_COUNTER(numchecks, "Number of checks in lookup"); 360 STATNODE_COUNTER(nummiss, "Number of cache misses"); 361 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache"); 362 STATNODE_COUNTER(numposzaps, 363 "Number of cache hits (positive) we do not want to cache"); 364 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)"); 365 STATNODE_COUNTER(numnegzaps, 366 "Number of cache hits (negative) we do not want to cache"); 367 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)"); 368 /* These count for kern___getcwd(), too. */ 369 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls"); 370 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 371 STATNODE_COUNTER(numfullpathfail2, 372 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 373 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 374 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls"); 375 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail, 376 "Number of times zap_and_exit failed to lock"); 377 static long cache_lock_vnodes_cel_3_failures; 378 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures, 379 "Number of times 3-way vnode locking failed"); 380 381 static void cache_zap_locked(struct namecache *ncp, bool neg_locked); 382 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 383 char *buf, char **retbuf, u_int buflen); 384 385 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 386 387 static int cache_yield; 388 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0, 389 "Number of times cache called yield"); 390 391 static void 392 cache_maybe_yield(void) 393 { 394 395 if (should_yield()) { 396 cache_yield++; 397 kern_yield(PRI_USER); 398 } 399 } 400 401 static inline void 402 cache_assert_vlp_locked(struct mtx *vlp) 403 { 404 405 if (vlp != NULL) 406 mtx_assert(vlp, MA_OWNED); 407 } 408 409 static inline void 410 cache_assert_vnode_locked(struct vnode *vp) 411 { 412 struct mtx *vlp; 413 414 vlp = VP2VNODELOCK(vp); 415 cache_assert_vlp_locked(vlp); 416 } 417 418 static uint32_t 419 cache_get_hash(char *name, u_char len, struct vnode *dvp) 420 { 421 uint32_t hash; 422 423 hash = fnv_32_buf(name, len, FNV1_32_INIT); 424 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 425 return (hash); 426 } 427 428 static inline struct rwlock * 429 NCP2BUCKETLOCK(struct namecache *ncp) 430 { 431 uint32_t hash; 432 433 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); 434 return (HASH2BUCKETLOCK(hash)); 435 } 436 437 #ifdef INVARIANTS 438 static void 439 cache_assert_bucket_locked(struct namecache *ncp, int mode) 440 { 441 struct rwlock *blp; 442 443 blp = NCP2BUCKETLOCK(ncp); 444 rw_assert(blp, mode); 445 } 446 #else 447 #define cache_assert_bucket_locked(x, y) do { } while (0) 448 #endif 449 450 #define cache_sort(x, y) _cache_sort((void **)(x), (void **)(y)) 451 static void 452 _cache_sort(void **p1, void **p2) 453 { 454 void *tmp; 455 456 if (*p1 > *p2) { 457 tmp = *p2; 458 *p2 = *p1; 459 *p1 = tmp; 460 } 461 } 462 463 static void 464 cache_lock_all_buckets(void) 465 { 466 u_int i; 467 468 for (i = 0; i < numbucketlocks; i++) 469 rw_wlock(&bucketlocks[i]); 470 } 471 472 static void 473 cache_unlock_all_buckets(void) 474 { 475 u_int i; 476 477 for (i = 0; i < numbucketlocks; i++) 478 rw_wunlock(&bucketlocks[i]); 479 } 480 481 static void 482 cache_lock_all_vnodes(void) 483 { 484 u_int i; 485 486 for (i = 0; i < numvnodelocks; i++) 487 mtx_lock(&vnodelocks[i]); 488 } 489 490 static void 491 cache_unlock_all_vnodes(void) 492 { 493 u_int i; 494 495 for (i = 0; i < numvnodelocks; i++) 496 mtx_unlock(&vnodelocks[i]); 497 } 498 499 static int 500 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 501 { 502 503 cache_sort(&vlp1, &vlp2); 504 MPASS(vlp2 != NULL); 505 506 if (vlp1 != NULL) { 507 if (!mtx_trylock(vlp1)) 508 return (EAGAIN); 509 } 510 if (!mtx_trylock(vlp2)) { 511 if (vlp1 != NULL) 512 mtx_unlock(vlp1); 513 return (EAGAIN); 514 } 515 516 return (0); 517 } 518 519 static void 520 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 521 { 522 523 MPASS(vlp1 != NULL || vlp2 != NULL); 524 525 if (vlp1 != NULL) 526 mtx_unlock(vlp1); 527 if (vlp2 != NULL) 528 mtx_unlock(vlp2); 529 } 530 531 static int 532 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 533 { 534 struct nchstats snap; 535 536 if (req->oldptr == NULL) 537 return (SYSCTL_OUT(req, 0, sizeof(snap))); 538 539 snap = nchstats; 540 snap.ncs_goodhits = counter_u64_fetch(numposhits); 541 snap.ncs_neghits = counter_u64_fetch(numneghits); 542 snap.ncs_badhits = counter_u64_fetch(numposzaps) + 543 counter_u64_fetch(numnegzaps); 544 snap.ncs_miss = counter_u64_fetch(nummisszap) + 545 counter_u64_fetch(nummiss); 546 547 return (SYSCTL_OUT(req, &snap, sizeof(snap))); 548 } 549 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD | 550 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU", 551 "VFS cache effectiveness statistics"); 552 553 #ifdef DIAGNOSTIC 554 /* 555 * Grab an atomic snapshot of the name cache hash chain lengths 556 */ 557 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, 558 "hash table stats"); 559 560 static int 561 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 562 { 563 struct nchashhead *ncpp; 564 struct namecache *ncp; 565 int i, error, n_nchash, *cntbuf; 566 567 retry: 568 n_nchash = nchash + 1; /* nchash is max index, not count */ 569 if (req->oldptr == NULL) 570 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 571 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 572 cache_lock_all_buckets(); 573 if (n_nchash != nchash + 1) { 574 cache_unlock_all_buckets(); 575 free(cntbuf, M_TEMP); 576 goto retry; 577 } 578 /* Scan hash tables counting entries */ 579 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 580 LIST_FOREACH(ncp, ncpp, nc_hash) 581 cntbuf[i]++; 582 cache_unlock_all_buckets(); 583 for (error = 0, i = 0; i < n_nchash; i++) 584 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 585 break; 586 free(cntbuf, M_TEMP); 587 return (error); 588 } 589 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 590 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 591 "nchash chain lengths"); 592 593 static int 594 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 595 { 596 int error; 597 struct nchashhead *ncpp; 598 struct namecache *ncp; 599 int n_nchash; 600 int count, maxlength, used, pct; 601 602 if (!req->oldptr) 603 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 604 605 cache_lock_all_buckets(); 606 n_nchash = nchash + 1; /* nchash is max index, not count */ 607 used = 0; 608 maxlength = 0; 609 610 /* Scan hash tables for applicable entries */ 611 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 612 count = 0; 613 LIST_FOREACH(ncp, ncpp, nc_hash) { 614 count++; 615 } 616 if (count) 617 used++; 618 if (maxlength < count) 619 maxlength = count; 620 } 621 n_nchash = nchash + 1; 622 cache_unlock_all_buckets(); 623 pct = (used * 100) / (n_nchash / 100); 624 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 625 if (error) 626 return (error); 627 error = SYSCTL_OUT(req, &used, sizeof(used)); 628 if (error) 629 return (error); 630 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 631 if (error) 632 return (error); 633 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 634 if (error) 635 return (error); 636 return (0); 637 } 638 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 639 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 640 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 641 #endif 642 643 /* 644 * Negative entries management 645 * 646 * A variation of LRU scheme is used. New entries are hashed into one of 647 * numneglists cold lists. Entries get promoted to the hot list on first hit. 648 * Partial LRU for the hot list is maintained by requeueing them every 649 * ncneghitsrequeue hits. 650 * 651 * The shrinker will demote hot list head and evict from the cold list in a 652 * round-robin manner. 653 */ 654 static void 655 cache_negative_hit(struct namecache *ncp) 656 { 657 struct neglist *neglist; 658 u_int hits; 659 660 MPASS(ncp->nc_flag & NCF_NEGATIVE); 661 hits = atomic_fetchadd_int(&ncp->nc_neghits, 1); 662 if (ncp->nc_flag & NCF_HOTNEGATIVE) { 663 if ((hits % ncneghitsrequeue) != 0) 664 return; 665 mtx_lock(&ncneg_hot.nl_lock); 666 if (ncp->nc_flag & NCF_HOTNEGATIVE) { 667 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst); 668 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst); 669 mtx_unlock(&ncneg_hot.nl_lock); 670 return; 671 } 672 /* 673 * The shrinker cleared the flag and removed the entry from 674 * the hot list. Put it back. 675 */ 676 } else { 677 mtx_lock(&ncneg_hot.nl_lock); 678 } 679 neglist = NCP2NEGLIST(ncp); 680 mtx_lock(&neglist->nl_lock); 681 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) { 682 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst); 683 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst); 684 ncp->nc_flag |= NCF_HOTNEGATIVE; 685 } 686 mtx_unlock(&neglist->nl_lock); 687 mtx_unlock(&ncneg_hot.nl_lock); 688 } 689 690 static void 691 cache_negative_insert(struct namecache *ncp, bool neg_locked) 692 { 693 struct neglist *neglist; 694 695 MPASS(ncp->nc_flag & NCF_NEGATIVE); 696 cache_assert_bucket_locked(ncp, RA_WLOCKED); 697 neglist = NCP2NEGLIST(ncp); 698 if (!neg_locked) { 699 mtx_lock(&neglist->nl_lock); 700 } else { 701 mtx_assert(&neglist->nl_lock, MA_OWNED); 702 } 703 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst); 704 if (!neg_locked) 705 mtx_unlock(&neglist->nl_lock); 706 atomic_add_rel_long(&numneg, 1); 707 } 708 709 static void 710 cache_negative_remove(struct namecache *ncp, bool neg_locked) 711 { 712 struct neglist *neglist; 713 bool hot_locked = false; 714 bool list_locked = false; 715 716 MPASS(ncp->nc_flag & NCF_NEGATIVE); 717 cache_assert_bucket_locked(ncp, RA_WLOCKED); 718 neglist = NCP2NEGLIST(ncp); 719 if (!neg_locked) { 720 if (ncp->nc_flag & NCF_HOTNEGATIVE) { 721 hot_locked = true; 722 mtx_lock(&ncneg_hot.nl_lock); 723 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) { 724 list_locked = true; 725 mtx_lock(&neglist->nl_lock); 726 } 727 } else { 728 list_locked = true; 729 mtx_lock(&neglist->nl_lock); 730 } 731 } 732 if (ncp->nc_flag & NCF_HOTNEGATIVE) { 733 mtx_assert(&ncneg_hot.nl_lock, MA_OWNED); 734 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst); 735 } else { 736 mtx_assert(&neglist->nl_lock, MA_OWNED); 737 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst); 738 } 739 if (list_locked) 740 mtx_unlock(&neglist->nl_lock); 741 if (hot_locked) 742 mtx_unlock(&ncneg_hot.nl_lock); 743 atomic_subtract_rel_long(&numneg, 1); 744 } 745 746 static void 747 cache_negative_shrink_select(int start, struct namecache **ncpp, 748 struct neglist **neglistpp) 749 { 750 struct neglist *neglist; 751 struct namecache *ncp; 752 int i; 753 754 *ncpp = ncp = NULL; 755 756 for (i = start; i < numneglists; i++) { 757 neglist = &neglists[i]; 758 if (TAILQ_FIRST(&neglist->nl_list) == NULL) 759 continue; 760 mtx_lock(&neglist->nl_lock); 761 ncp = TAILQ_FIRST(&neglist->nl_list); 762 if (ncp != NULL) 763 break; 764 mtx_unlock(&neglist->nl_lock); 765 } 766 767 *neglistpp = neglist; 768 *ncpp = ncp; 769 } 770 771 static void 772 cache_negative_zap_one(void) 773 { 774 struct namecache *ncp, *ncp2; 775 struct neglist *neglist; 776 struct mtx *dvlp; 777 struct rwlock *blp; 778 779 if (!mtx_trylock(&ncneg_shrink_lock)) 780 return; 781 782 mtx_lock(&ncneg_hot.nl_lock); 783 ncp = TAILQ_FIRST(&ncneg_hot.nl_list); 784 if (ncp != NULL) { 785 neglist = NCP2NEGLIST(ncp); 786 mtx_lock(&neglist->nl_lock); 787 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst); 788 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst); 789 ncp->nc_flag &= ~NCF_HOTNEGATIVE; 790 mtx_unlock(&neglist->nl_lock); 791 } 792 793 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist); 794 shrink_list_turn++; 795 if (shrink_list_turn == numneglists) 796 shrink_list_turn = 0; 797 if (ncp == NULL && shrink_list_turn == 0) 798 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist); 799 if (ncp == NULL) { 800 mtx_unlock(&ncneg_hot.nl_lock); 801 goto out; 802 } 803 804 MPASS(ncp->nc_flag & NCF_NEGATIVE); 805 dvlp = VP2VNODELOCK(ncp->nc_dvp); 806 blp = NCP2BUCKETLOCK(ncp); 807 mtx_unlock(&neglist->nl_lock); 808 mtx_unlock(&ncneg_hot.nl_lock); 809 mtx_lock(dvlp); 810 rw_wlock(blp); 811 mtx_lock(&neglist->nl_lock); 812 ncp2 = TAILQ_FIRST(&neglist->nl_list); 813 if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) || 814 blp != NCP2BUCKETLOCK(ncp2) || !(ncp2->nc_flag & NCF_NEGATIVE)) { 815 ncp = NULL; 816 goto out_unlock_all; 817 } 818 SDT_PROBE3(vfs, namecache, shrink_negative, done, ncp->nc_dvp, 819 ncp->nc_name, ncp->nc_neghits); 820 821 cache_zap_locked(ncp, true); 822 out_unlock_all: 823 mtx_unlock(&neglist->nl_lock); 824 rw_wunlock(blp); 825 mtx_unlock(dvlp); 826 out: 827 mtx_unlock(&ncneg_shrink_lock); 828 cache_free(ncp); 829 } 830 831 /* 832 * cache_zap_locked(): 833 * 834 * Removes a namecache entry from cache, whether it contains an actual 835 * pointer to a vnode or if it is just a negative cache entry. 836 */ 837 static void 838 cache_zap_locked(struct namecache *ncp, bool neg_locked) 839 { 840 841 if (!(ncp->nc_flag & NCF_NEGATIVE)) 842 cache_assert_vnode_locked(ncp->nc_vp); 843 cache_assert_vnode_locked(ncp->nc_dvp); 844 cache_assert_bucket_locked(ncp, RA_WLOCKED); 845 846 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, 847 (ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp); 848 if (!(ncp->nc_flag & NCF_NEGATIVE)) { 849 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp, 850 ncp->nc_name, ncp->nc_vp); 851 } else { 852 SDT_PROBE3(vfs, namecache, zap_negative, done, ncp->nc_dvp, 853 ncp->nc_name, ncp->nc_neghits); 854 } 855 LIST_REMOVE(ncp, nc_hash); 856 if (!(ncp->nc_flag & NCF_NEGATIVE)) { 857 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 858 if (ncp == ncp->nc_vp->v_cache_dd) 859 ncp->nc_vp->v_cache_dd = NULL; 860 } else { 861 cache_negative_remove(ncp, neg_locked); 862 } 863 if (ncp->nc_flag & NCF_ISDOTDOT) { 864 if (ncp == ncp->nc_dvp->v_cache_dd) 865 ncp->nc_dvp->v_cache_dd = NULL; 866 } else { 867 LIST_REMOVE(ncp, nc_src); 868 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 869 ncp->nc_flag |= NCF_DVDROP; 870 atomic_subtract_rel_long(&numcachehv, 1); 871 } 872 } 873 atomic_subtract_rel_long(&numcache, 1); 874 } 875 876 static void 877 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp) 878 { 879 struct rwlock *blp; 880 881 MPASS(ncp->nc_dvp == vp); 882 MPASS(ncp->nc_flag & NCF_NEGATIVE); 883 cache_assert_vnode_locked(vp); 884 885 blp = NCP2BUCKETLOCK(ncp); 886 rw_wlock(blp); 887 cache_zap_locked(ncp, false); 888 rw_wunlock(blp); 889 } 890 891 static bool 892 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp, 893 struct mtx **vlpp) 894 { 895 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; 896 struct rwlock *blp; 897 898 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp); 899 cache_assert_vnode_locked(vp); 900 901 if (ncp->nc_flag & NCF_NEGATIVE) { 902 if (*vlpp != NULL) { 903 mtx_unlock(*vlpp); 904 *vlpp = NULL; 905 } 906 cache_zap_negative_locked_vnode_kl(ncp, vp); 907 return (true); 908 } 909 910 pvlp = VP2VNODELOCK(vp); 911 blp = NCP2BUCKETLOCK(ncp); 912 vlp1 = VP2VNODELOCK(ncp->nc_dvp); 913 vlp2 = VP2VNODELOCK(ncp->nc_vp); 914 915 if (*vlpp == vlp1 || *vlpp == vlp2) { 916 to_unlock = *vlpp; 917 *vlpp = NULL; 918 } else { 919 if (*vlpp != NULL) { 920 mtx_unlock(*vlpp); 921 *vlpp = NULL; 922 } 923 cache_sort(&vlp1, &vlp2); 924 if (vlp1 == pvlp) { 925 mtx_lock(vlp2); 926 to_unlock = vlp2; 927 } else { 928 if (!mtx_trylock(vlp1)) 929 goto out_relock; 930 to_unlock = vlp1; 931 } 932 } 933 rw_wlock(blp); 934 cache_zap_locked(ncp, false); 935 rw_wunlock(blp); 936 if (to_unlock != NULL) 937 mtx_unlock(to_unlock); 938 return (true); 939 940 out_relock: 941 mtx_unlock(vlp2); 942 mtx_lock(vlp1); 943 mtx_lock(vlp2); 944 MPASS(*vlpp == NULL); 945 *vlpp = vlp1; 946 return (false); 947 } 948 949 static int 950 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp) 951 { 952 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; 953 struct rwlock *blp; 954 int error = 0; 955 956 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp); 957 cache_assert_vnode_locked(vp); 958 959 pvlp = VP2VNODELOCK(vp); 960 if (ncp->nc_flag & NCF_NEGATIVE) { 961 cache_zap_negative_locked_vnode_kl(ncp, vp); 962 goto out; 963 } 964 965 blp = NCP2BUCKETLOCK(ncp); 966 vlp1 = VP2VNODELOCK(ncp->nc_dvp); 967 vlp2 = VP2VNODELOCK(ncp->nc_vp); 968 cache_sort(&vlp1, &vlp2); 969 if (vlp1 == pvlp) { 970 mtx_lock(vlp2); 971 to_unlock = vlp2; 972 } else { 973 if (!mtx_trylock(vlp1)) { 974 error = EAGAIN; 975 goto out; 976 } 977 to_unlock = vlp1; 978 } 979 rw_wlock(blp); 980 cache_zap_locked(ncp, false); 981 rw_wunlock(blp); 982 mtx_unlock(to_unlock); 983 out: 984 mtx_unlock(pvlp); 985 return (error); 986 } 987 988 static int 989 cache_zap_wlocked_bucket(struct namecache *ncp, struct rwlock *blp) 990 { 991 struct mtx *dvlp, *vlp; 992 993 cache_assert_bucket_locked(ncp, RA_WLOCKED); 994 995 dvlp = VP2VNODELOCK(ncp->nc_dvp); 996 vlp = NULL; 997 if (!(ncp->nc_flag & NCF_NEGATIVE)) 998 vlp = VP2VNODELOCK(ncp->nc_vp); 999 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1000 cache_zap_locked(ncp, false); 1001 rw_wunlock(blp); 1002 cache_unlock_vnodes(dvlp, vlp); 1003 return (0); 1004 } 1005 1006 rw_wunlock(blp); 1007 return (EAGAIN); 1008 } 1009 1010 static int 1011 cache_zap_rlocked_bucket(struct namecache *ncp, struct rwlock *blp) 1012 { 1013 struct mtx *dvlp, *vlp; 1014 1015 cache_assert_bucket_locked(ncp, RA_RLOCKED); 1016 1017 dvlp = VP2VNODELOCK(ncp->nc_dvp); 1018 vlp = NULL; 1019 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1020 vlp = VP2VNODELOCK(ncp->nc_vp); 1021 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1022 rw_runlock(blp); 1023 rw_wlock(blp); 1024 cache_zap_locked(ncp, false); 1025 rw_wunlock(blp); 1026 cache_unlock_vnodes(dvlp, vlp); 1027 return (0); 1028 } 1029 1030 rw_runlock(blp); 1031 return (EAGAIN); 1032 } 1033 1034 static int 1035 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp, 1036 struct mtx **vlpp1, struct mtx **vlpp2) 1037 { 1038 struct mtx *dvlp, *vlp; 1039 1040 cache_assert_bucket_locked(ncp, RA_WLOCKED); 1041 1042 dvlp = VP2VNODELOCK(ncp->nc_dvp); 1043 vlp = NULL; 1044 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1045 vlp = VP2VNODELOCK(ncp->nc_vp); 1046 cache_sort(&dvlp, &vlp); 1047 1048 if (*vlpp1 == dvlp && *vlpp2 == vlp) { 1049 cache_zap_locked(ncp, false); 1050 cache_unlock_vnodes(dvlp, vlp); 1051 *vlpp1 = NULL; 1052 *vlpp2 = NULL; 1053 return (0); 1054 } 1055 1056 if (*vlpp1 != NULL) 1057 mtx_unlock(*vlpp1); 1058 if (*vlpp2 != NULL) 1059 mtx_unlock(*vlpp2); 1060 *vlpp1 = NULL; 1061 *vlpp2 = NULL; 1062 1063 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1064 cache_zap_locked(ncp, false); 1065 cache_unlock_vnodes(dvlp, vlp); 1066 return (0); 1067 } 1068 1069 rw_wunlock(blp); 1070 *vlpp1 = dvlp; 1071 *vlpp2 = vlp; 1072 if (*vlpp1 != NULL) 1073 mtx_lock(*vlpp1); 1074 mtx_lock(*vlpp2); 1075 rw_wlock(blp); 1076 return (EAGAIN); 1077 } 1078 1079 static void 1080 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp) 1081 { 1082 1083 if (blp != NULL) { 1084 rw_runlock(blp); 1085 } else { 1086 mtx_unlock(vlp); 1087 } 1088 } 1089 1090 static int __noinline 1091 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1092 struct timespec *tsp, int *ticksp) 1093 { 1094 int ltype; 1095 1096 *vpp = dvp; 1097 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 1098 dvp, cnp->cn_nameptr); 1099 counter_u64_add(dothits, 1); 1100 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp); 1101 if (tsp != NULL) 1102 timespecclear(tsp); 1103 if (ticksp != NULL) 1104 *ticksp = ticks; 1105 vrefact(*vpp); 1106 /* 1107 * When we lookup "." we still can be asked to lock it 1108 * differently... 1109 */ 1110 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 1111 if (ltype != VOP_ISLOCKED(*vpp)) { 1112 if (ltype == LK_EXCLUSIVE) { 1113 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 1114 if ((*vpp)->v_iflag & VI_DOOMED) { 1115 /* forced unmount */ 1116 vrele(*vpp); 1117 *vpp = NULL; 1118 return (ENOENT); 1119 } 1120 } else 1121 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 1122 } 1123 return (-1); 1124 } 1125 1126 /* 1127 * Lookup an entry in the cache 1128 * 1129 * Lookup is called with dvp pointing to the directory to search, 1130 * cnp pointing to the name of the entry being sought. If the lookup 1131 * succeeds, the vnode is returned in *vpp, and a status of -1 is 1132 * returned. If the lookup determines that the name does not exist 1133 * (negative caching), a status of ENOENT is returned. If the lookup 1134 * fails, a status of zero is returned. If the directory vnode is 1135 * recycled out from under us due to a forced unmount, a status of 1136 * ENOENT is returned. 1137 * 1138 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 1139 * unlocked. If we're looking up . an extra ref is taken, but the lock is 1140 * not recursively acquired. 1141 */ 1142 1143 static __noinline int 1144 cache_lookup_nomakeentry(struct vnode *dvp, struct vnode **vpp, 1145 struct componentname *cnp, struct timespec *tsp, int *ticksp) 1146 { 1147 struct namecache *ncp; 1148 struct rwlock *blp; 1149 struct mtx *dvlp, *dvlp2; 1150 uint32_t hash; 1151 int error; 1152 1153 if (cnp->cn_namelen == 2 && 1154 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { 1155 counter_u64_add(dotdothits, 1); 1156 dvlp = VP2VNODELOCK(dvp); 1157 dvlp2 = NULL; 1158 mtx_lock(dvlp); 1159 retry_dotdot: 1160 ncp = dvp->v_cache_dd; 1161 if (ncp == NULL) { 1162 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, 1163 "..", NULL); 1164 mtx_unlock(dvlp); 1165 return (0); 1166 } 1167 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1168 if (ncp->nc_dvp != dvp) 1169 panic("dvp %p v_cache_dd %p\n", dvp, ncp); 1170 if (!cache_zap_locked_vnode_kl2(ncp, 1171 dvp, &dvlp2)) 1172 goto retry_dotdot; 1173 MPASS(dvp->v_cache_dd == NULL); 1174 mtx_unlock(dvlp); 1175 if (dvlp2 != NULL) 1176 mtx_unlock(dvlp2); 1177 cache_free(ncp); 1178 } else { 1179 dvp->v_cache_dd = NULL; 1180 mtx_unlock(dvlp); 1181 if (dvlp2 != NULL) 1182 mtx_unlock(dvlp2); 1183 } 1184 return (0); 1185 } 1186 1187 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1188 blp = HASH2BUCKETLOCK(hash); 1189 retry: 1190 if (LIST_EMPTY(NCHHASH(hash))) 1191 goto out_no_entry; 1192 1193 rw_wlock(blp); 1194 1195 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1196 counter_u64_add(numchecks, 1); 1197 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1198 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1199 break; 1200 } 1201 1202 /* We failed to find an entry */ 1203 if (ncp == NULL) { 1204 rw_wunlock(blp); 1205 goto out_no_entry; 1206 } 1207 1208 counter_u64_add(numposzaps, 1); 1209 1210 error = cache_zap_wlocked_bucket(ncp, blp); 1211 if (error != 0) { 1212 zap_and_exit_bucket_fail++; 1213 cache_maybe_yield(); 1214 goto retry; 1215 } 1216 cache_free(ncp); 1217 return (0); 1218 out_no_entry: 1219 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, NULL); 1220 counter_u64_add(nummisszap, 1); 1221 return (0); 1222 } 1223 1224 int 1225 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1226 struct timespec *tsp, int *ticksp) 1227 { 1228 struct namecache_ts *ncp_ts; 1229 struct namecache *ncp; 1230 struct rwlock *blp; 1231 struct mtx *dvlp, *dvlp2; 1232 uint32_t hash; 1233 int error, ltype; 1234 1235 if (__predict_false(!doingcache)) { 1236 cnp->cn_flags &= ~MAKEENTRY; 1237 return (0); 1238 } 1239 1240 counter_u64_add(numcalls, 1); 1241 1242 if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.')) 1243 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp)); 1244 1245 if ((cnp->cn_flags & MAKEENTRY) == 0) 1246 return (cache_lookup_nomakeentry(dvp, vpp, cnp, tsp, ticksp)); 1247 1248 retry: 1249 blp = NULL; 1250 error = 0; 1251 if (cnp->cn_namelen == 2 && 1252 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { 1253 counter_u64_add(dotdothits, 1); 1254 dvlp = VP2VNODELOCK(dvp); 1255 dvlp2 = NULL; 1256 mtx_lock(dvlp); 1257 ncp = dvp->v_cache_dd; 1258 if (ncp == NULL) { 1259 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, 1260 "..", NULL); 1261 mtx_unlock(dvlp); 1262 return (0); 1263 } 1264 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1265 if (ncp->nc_flag & NCF_NEGATIVE) 1266 *vpp = NULL; 1267 else 1268 *vpp = ncp->nc_vp; 1269 } else 1270 *vpp = ncp->nc_dvp; 1271 /* Return failure if negative entry was found. */ 1272 if (*vpp == NULL) 1273 goto negative_success; 1274 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 1275 dvp, cnp->cn_nameptr, *vpp); 1276 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", 1277 *vpp); 1278 cache_out_ts(ncp, tsp, ticksp); 1279 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 1280 NCF_DTS && tsp != NULL) { 1281 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 1282 *tsp = ncp_ts->nc_dotdottime; 1283 } 1284 goto success; 1285 } 1286 1287 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1288 blp = HASH2BUCKETLOCK(hash); 1289 rw_rlock(blp); 1290 1291 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1292 counter_u64_add(numchecks, 1); 1293 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1294 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1295 break; 1296 } 1297 1298 /* We failed to find an entry */ 1299 if (ncp == NULL) { 1300 rw_runlock(blp); 1301 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 1302 NULL); 1303 counter_u64_add(nummiss, 1); 1304 return (0); 1305 } 1306 1307 /* We found a "positive" match, return the vnode */ 1308 if (!(ncp->nc_flag & NCF_NEGATIVE)) { 1309 counter_u64_add(numposhits, 1); 1310 *vpp = ncp->nc_vp; 1311 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 1312 dvp, cnp->cn_nameptr, *vpp, ncp); 1313 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, 1314 *vpp); 1315 cache_out_ts(ncp, tsp, ticksp); 1316 goto success; 1317 } 1318 1319 negative_success: 1320 /* We found a negative match, and want to create it, so purge */ 1321 if (cnp->cn_nameiop == CREATE) { 1322 counter_u64_add(numnegzaps, 1); 1323 goto zap_and_exit; 1324 } 1325 1326 counter_u64_add(numneghits, 1); 1327 cache_negative_hit(ncp); 1328 if (ncp->nc_flag & NCF_WHITE) 1329 cnp->cn_flags |= ISWHITEOUT; 1330 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, 1331 ncp->nc_name); 1332 cache_out_ts(ncp, tsp, ticksp); 1333 cache_lookup_unlock(blp, dvlp); 1334 return (ENOENT); 1335 1336 success: 1337 /* 1338 * On success we return a locked and ref'd vnode as per the lookup 1339 * protocol. 1340 */ 1341 MPASS(dvp != *vpp); 1342 ltype = 0; /* silence gcc warning */ 1343 if (cnp->cn_flags & ISDOTDOT) { 1344 ltype = VOP_ISLOCKED(dvp); 1345 VOP_UNLOCK(dvp, 0); 1346 } 1347 vhold(*vpp); 1348 cache_lookup_unlock(blp, dvlp); 1349 error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread); 1350 if (cnp->cn_flags & ISDOTDOT) { 1351 vn_lock(dvp, ltype | LK_RETRY); 1352 if (dvp->v_iflag & VI_DOOMED) { 1353 if (error == 0) 1354 vput(*vpp); 1355 *vpp = NULL; 1356 return (ENOENT); 1357 } 1358 } 1359 if (error) { 1360 *vpp = NULL; 1361 goto retry; 1362 } 1363 if ((cnp->cn_flags & ISLASTCN) && 1364 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 1365 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 1366 } 1367 return (-1); 1368 1369 zap_and_exit: 1370 if (blp != NULL) 1371 error = cache_zap_rlocked_bucket(ncp, blp); 1372 else 1373 error = cache_zap_locked_vnode(ncp, dvp); 1374 if (error != 0) { 1375 zap_and_exit_bucket_fail++; 1376 cache_maybe_yield(); 1377 goto retry; 1378 } 1379 cache_free(ncp); 1380 return (0); 1381 } 1382 1383 struct celockstate { 1384 struct mtx *vlp[3]; 1385 struct rwlock *blp[2]; 1386 }; 1387 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3)); 1388 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2)); 1389 1390 static inline void 1391 cache_celockstate_init(struct celockstate *cel) 1392 { 1393 1394 bzero(cel, sizeof(*cel)); 1395 } 1396 1397 static void 1398 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp, 1399 struct vnode *dvp) 1400 { 1401 struct mtx *vlp1, *vlp2; 1402 1403 MPASS(cel->vlp[0] == NULL); 1404 MPASS(cel->vlp[1] == NULL); 1405 MPASS(cel->vlp[2] == NULL); 1406 1407 MPASS(vp != NULL || dvp != NULL); 1408 1409 vlp1 = VP2VNODELOCK(vp); 1410 vlp2 = VP2VNODELOCK(dvp); 1411 cache_sort(&vlp1, &vlp2); 1412 1413 if (vlp1 != NULL) { 1414 mtx_lock(vlp1); 1415 cel->vlp[0] = vlp1; 1416 } 1417 mtx_lock(vlp2); 1418 cel->vlp[1] = vlp2; 1419 } 1420 1421 static void 1422 cache_unlock_vnodes_cel(struct celockstate *cel) 1423 { 1424 1425 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL); 1426 1427 if (cel->vlp[0] != NULL) 1428 mtx_unlock(cel->vlp[0]); 1429 if (cel->vlp[1] != NULL) 1430 mtx_unlock(cel->vlp[1]); 1431 if (cel->vlp[2] != NULL) 1432 mtx_unlock(cel->vlp[2]); 1433 } 1434 1435 static bool 1436 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp) 1437 { 1438 struct mtx *vlp; 1439 bool ret; 1440 1441 cache_assert_vlp_locked(cel->vlp[0]); 1442 cache_assert_vlp_locked(cel->vlp[1]); 1443 MPASS(cel->vlp[2] == NULL); 1444 1445 MPASS(vp != NULL); 1446 vlp = VP2VNODELOCK(vp); 1447 1448 ret = true; 1449 if (vlp >= cel->vlp[1]) { 1450 mtx_lock(vlp); 1451 } else { 1452 if (mtx_trylock(vlp)) 1453 goto out; 1454 cache_lock_vnodes_cel_3_failures++; 1455 cache_unlock_vnodes_cel(cel); 1456 if (vlp < cel->vlp[0]) { 1457 mtx_lock(vlp); 1458 mtx_lock(cel->vlp[0]); 1459 mtx_lock(cel->vlp[1]); 1460 } else { 1461 if (cel->vlp[0] != NULL) 1462 mtx_lock(cel->vlp[0]); 1463 mtx_lock(vlp); 1464 mtx_lock(cel->vlp[1]); 1465 } 1466 ret = false; 1467 } 1468 out: 1469 cel->vlp[2] = vlp; 1470 return (ret); 1471 } 1472 1473 static void 1474 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1, 1475 struct rwlock *blp2) 1476 { 1477 1478 MPASS(cel->blp[0] == NULL); 1479 MPASS(cel->blp[1] == NULL); 1480 1481 cache_sort(&blp1, &blp2); 1482 1483 if (blp1 != NULL) { 1484 rw_wlock(blp1); 1485 cel->blp[0] = blp1; 1486 } 1487 rw_wlock(blp2); 1488 cel->blp[1] = blp2; 1489 } 1490 1491 static void 1492 cache_unlock_buckets_cel(struct celockstate *cel) 1493 { 1494 1495 if (cel->blp[0] != NULL) 1496 rw_wunlock(cel->blp[0]); 1497 rw_wunlock(cel->blp[1]); 1498 } 1499 1500 /* 1501 * Lock part of the cache affected by the insertion. 1502 * 1503 * This means vnodelocks for dvp, vp and the relevant bucketlock. 1504 * However, insertion can result in removal of an old entry. In this 1505 * case we have an additional vnode and bucketlock pair to lock. If the 1506 * entry is negative, ncelock is locked instead of the vnode. 1507 * 1508 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while 1509 * preserving the locking order (smaller address first). 1510 */ 1511 static void 1512 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 1513 uint32_t hash) 1514 { 1515 struct namecache *ncp; 1516 struct rwlock *blps[2]; 1517 1518 blps[0] = HASH2BUCKETLOCK(hash); 1519 for (;;) { 1520 blps[1] = NULL; 1521 cache_lock_vnodes_cel(cel, dvp, vp); 1522 if (vp == NULL || vp->v_type != VDIR) 1523 break; 1524 ncp = vp->v_cache_dd; 1525 if (ncp == NULL) 1526 break; 1527 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1528 break; 1529 MPASS(ncp->nc_dvp == vp); 1530 blps[1] = NCP2BUCKETLOCK(ncp); 1531 if (ncp->nc_flag & NCF_NEGATIVE) 1532 break; 1533 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 1534 break; 1535 /* 1536 * All vnodes got re-locked. Re-validate the state and if 1537 * nothing changed we are done. Otherwise restart. 1538 */ 1539 if (ncp == vp->v_cache_dd && 1540 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 1541 blps[1] == NCP2BUCKETLOCK(ncp) && 1542 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 1543 break; 1544 cache_unlock_vnodes_cel(cel); 1545 cel->vlp[0] = NULL; 1546 cel->vlp[1] = NULL; 1547 cel->vlp[2] = NULL; 1548 } 1549 cache_lock_buckets_cel(cel, blps[0], blps[1]); 1550 } 1551 1552 static void 1553 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 1554 uint32_t hash) 1555 { 1556 struct namecache *ncp; 1557 struct rwlock *blps[2]; 1558 1559 blps[0] = HASH2BUCKETLOCK(hash); 1560 for (;;) { 1561 blps[1] = NULL; 1562 cache_lock_vnodes_cel(cel, dvp, vp); 1563 ncp = dvp->v_cache_dd; 1564 if (ncp == NULL) 1565 break; 1566 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1567 break; 1568 MPASS(ncp->nc_dvp == dvp); 1569 blps[1] = NCP2BUCKETLOCK(ncp); 1570 if (ncp->nc_flag & NCF_NEGATIVE) 1571 break; 1572 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 1573 break; 1574 if (ncp == dvp->v_cache_dd && 1575 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 1576 blps[1] == NCP2BUCKETLOCK(ncp) && 1577 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 1578 break; 1579 cache_unlock_vnodes_cel(cel); 1580 cel->vlp[0] = NULL; 1581 cel->vlp[1] = NULL; 1582 cel->vlp[2] = NULL; 1583 } 1584 cache_lock_buckets_cel(cel, blps[0], blps[1]); 1585 } 1586 1587 static void 1588 cache_enter_unlock(struct celockstate *cel) 1589 { 1590 1591 cache_unlock_buckets_cel(cel); 1592 cache_unlock_vnodes_cel(cel); 1593 } 1594 1595 /* 1596 * Add an entry to the cache. 1597 */ 1598 void 1599 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, 1600 struct timespec *tsp, struct timespec *dtsp) 1601 { 1602 struct celockstate cel; 1603 struct namecache *ncp, *n2, *ndd; 1604 struct namecache_ts *ncp_ts, *n2_ts; 1605 struct nchashhead *ncpp; 1606 struct neglist *neglist; 1607 uint32_t hash; 1608 int flag; 1609 int len; 1610 bool neg_locked; 1611 int lnumcache; 1612 1613 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 1614 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 1615 ("cache_enter: Adding a doomed vnode")); 1616 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, 1617 ("cache_enter: Doomed vnode used as src")); 1618 1619 if (__predict_false(!doingcache)) 1620 return; 1621 1622 /* 1623 * Avoid blowout in namecache entries. 1624 */ 1625 if (__predict_false(numcache >= desiredvnodes * ncsizefactor)) 1626 return; 1627 1628 cache_celockstate_init(&cel); 1629 ndd = NULL; 1630 flag = 0; 1631 if (cnp->cn_nameptr[0] == '.') { 1632 if (cnp->cn_namelen == 1) 1633 return; 1634 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 1635 len = cnp->cn_namelen; 1636 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 1637 cache_enter_lock_dd(&cel, dvp, vp, hash); 1638 /* 1639 * If dotdot entry already exists, just retarget it 1640 * to new parent vnode, otherwise continue with new 1641 * namecache entry allocation. 1642 */ 1643 if ((ncp = dvp->v_cache_dd) != NULL && 1644 ncp->nc_flag & NCF_ISDOTDOT) { 1645 KASSERT(ncp->nc_dvp == dvp, 1646 ("wrong isdotdot parent")); 1647 neg_locked = false; 1648 if (ncp->nc_flag & NCF_NEGATIVE || vp == NULL) { 1649 neglist = NCP2NEGLIST(ncp); 1650 mtx_lock(&ncneg_hot.nl_lock); 1651 mtx_lock(&neglist->nl_lock); 1652 neg_locked = true; 1653 } 1654 if (!(ncp->nc_flag & NCF_NEGATIVE)) { 1655 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, 1656 ncp, nc_dst); 1657 } else { 1658 cache_negative_remove(ncp, true); 1659 } 1660 if (vp != NULL) { 1661 TAILQ_INSERT_HEAD(&vp->v_cache_dst, 1662 ncp, nc_dst); 1663 ncp->nc_flag &= ~(NCF_NEGATIVE|NCF_HOTNEGATIVE); 1664 } else { 1665 ncp->nc_flag &= ~(NCF_HOTNEGATIVE); 1666 ncp->nc_flag |= NCF_NEGATIVE; 1667 cache_negative_insert(ncp, true); 1668 } 1669 if (neg_locked) { 1670 mtx_unlock(&neglist->nl_lock); 1671 mtx_unlock(&ncneg_hot.nl_lock); 1672 } 1673 ncp->nc_vp = vp; 1674 cache_enter_unlock(&cel); 1675 return; 1676 } 1677 dvp->v_cache_dd = NULL; 1678 cache_enter_unlock(&cel); 1679 cache_celockstate_init(&cel); 1680 SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp); 1681 flag = NCF_ISDOTDOT; 1682 } 1683 } 1684 1685 /* 1686 * Calculate the hash key and setup as much of the new 1687 * namecache entry as possible before acquiring the lock. 1688 */ 1689 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 1690 ncp->nc_flag = flag; 1691 ncp->nc_vp = vp; 1692 if (vp == NULL) 1693 ncp->nc_flag |= NCF_NEGATIVE; 1694 ncp->nc_dvp = dvp; 1695 if (tsp != NULL) { 1696 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 1697 ncp_ts->nc_time = *tsp; 1698 ncp_ts->nc_ticks = ticks; 1699 ncp_ts->nc_nc.nc_flag |= NCF_TS; 1700 if (dtsp != NULL) { 1701 ncp_ts->nc_dotdottime = *dtsp; 1702 ncp_ts->nc_nc.nc_flag |= NCF_DTS; 1703 } 1704 } 1705 len = ncp->nc_nlen = cnp->cn_namelen; 1706 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 1707 strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1); 1708 cache_enter_lock(&cel, dvp, vp, hash); 1709 1710 /* 1711 * See if this vnode or negative entry is already in the cache 1712 * with this name. This can happen with concurrent lookups of 1713 * the same path name. 1714 */ 1715 ncpp = NCHHASH(hash); 1716 LIST_FOREACH(n2, ncpp, nc_hash) { 1717 if (n2->nc_dvp == dvp && 1718 n2->nc_nlen == cnp->cn_namelen && 1719 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) { 1720 if (tsp != NULL) { 1721 KASSERT((n2->nc_flag & NCF_TS) != 0, 1722 ("no NCF_TS")); 1723 n2_ts = __containerof(n2, struct namecache_ts, nc_nc); 1724 n2_ts->nc_time = ncp_ts->nc_time; 1725 n2_ts->nc_ticks = ncp_ts->nc_ticks; 1726 if (dtsp != NULL) { 1727 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime; 1728 if (ncp->nc_flag & NCF_NEGATIVE) 1729 mtx_lock(&ncneg_hot.nl_lock); 1730 n2_ts->nc_nc.nc_flag |= NCF_DTS; 1731 if (ncp->nc_flag & NCF_NEGATIVE) 1732 mtx_unlock(&ncneg_hot.nl_lock); 1733 } 1734 } 1735 goto out_unlock_free; 1736 } 1737 } 1738 1739 if (flag == NCF_ISDOTDOT) { 1740 /* 1741 * See if we are trying to add .. entry, but some other lookup 1742 * has populated v_cache_dd pointer already. 1743 */ 1744 if (dvp->v_cache_dd != NULL) 1745 goto out_unlock_free; 1746 KASSERT(vp == NULL || vp->v_type == VDIR, 1747 ("wrong vnode type %p", vp)); 1748 dvp->v_cache_dd = ncp; 1749 } 1750 1751 if (vp != NULL) { 1752 if (vp->v_type == VDIR) { 1753 if (flag != NCF_ISDOTDOT) { 1754 /* 1755 * For this case, the cache entry maps both the 1756 * directory name in it and the name ".." for the 1757 * directory's parent. 1758 */ 1759 if ((ndd = vp->v_cache_dd) != NULL) { 1760 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0) 1761 cache_zap_locked(ndd, false); 1762 else 1763 ndd = NULL; 1764 } 1765 vp->v_cache_dd = ncp; 1766 } 1767 } else { 1768 vp->v_cache_dd = NULL; 1769 } 1770 } 1771 1772 if (flag != NCF_ISDOTDOT) { 1773 if (LIST_EMPTY(&dvp->v_cache_src)) { 1774 vhold(dvp); 1775 atomic_add_rel_long(&numcachehv, 1); 1776 } 1777 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 1778 } 1779 1780 /* 1781 * Insert the new namecache entry into the appropriate chain 1782 * within the cache entries table. 1783 */ 1784 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 1785 1786 /* 1787 * If the entry is "negative", we place it into the 1788 * "negative" cache queue, otherwise, we place it into the 1789 * destination vnode's cache entries queue. 1790 */ 1791 if (vp != NULL) { 1792 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 1793 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name, 1794 vp); 1795 } else { 1796 if (cnp->cn_flags & ISWHITEOUT) 1797 ncp->nc_flag |= NCF_WHITE; 1798 cache_negative_insert(ncp, false); 1799 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp, 1800 ncp->nc_name); 1801 } 1802 cache_enter_unlock(&cel); 1803 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1; 1804 if (numneg * ncnegfactor > lnumcache) 1805 cache_negative_zap_one(); 1806 cache_free(ndd); 1807 return; 1808 out_unlock_free: 1809 cache_enter_unlock(&cel); 1810 cache_free(ncp); 1811 return; 1812 } 1813 1814 static u_int 1815 cache_roundup_2(u_int val) 1816 { 1817 u_int res; 1818 1819 for (res = 1; res <= val; res <<= 1) 1820 continue; 1821 1822 return (res); 1823 } 1824 1825 /* 1826 * Name cache initialization, from vfs_init() when we are booting 1827 */ 1828 static void 1829 nchinit(void *dummy __unused) 1830 { 1831 u_int i; 1832 1833 cache_zone_small = uma_zcreate("S VFS Cache", 1834 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1, 1835 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache), 1836 UMA_ZONE_ZINIT); 1837 cache_zone_small_ts = uma_zcreate("STS VFS Cache", 1838 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1, 1839 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts), 1840 UMA_ZONE_ZINIT); 1841 cache_zone_large = uma_zcreate("L VFS Cache", 1842 sizeof(struct namecache) + NAME_MAX + 1, 1843 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache), 1844 UMA_ZONE_ZINIT); 1845 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", 1846 sizeof(struct namecache_ts) + NAME_MAX + 1, 1847 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts), 1848 UMA_ZONE_ZINIT); 1849 1850 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 1851 ncbuckethash = cache_roundup_2(mp_ncpus * 64) - 1; 1852 if (ncbuckethash > nchash) 1853 ncbuckethash = nchash; 1854 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE, 1855 M_WAITOK | M_ZERO); 1856 for (i = 0; i < numbucketlocks; i++) 1857 rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE); 1858 ncvnodehash = cache_roundup_2(mp_ncpus * 64) - 1; 1859 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE, 1860 M_WAITOK | M_ZERO); 1861 for (i = 0; i < numvnodelocks; i++) 1862 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE); 1863 ncpurgeminvnodes = numbucketlocks; 1864 1865 ncneghash = 3; 1866 neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE, 1867 M_WAITOK | M_ZERO); 1868 for (i = 0; i < numneglists; i++) { 1869 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF); 1870 TAILQ_INIT(&neglists[i].nl_list); 1871 } 1872 mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF); 1873 TAILQ_INIT(&ncneg_hot.nl_list); 1874 1875 mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF); 1876 1877 numcalls = counter_u64_alloc(M_WAITOK); 1878 dothits = counter_u64_alloc(M_WAITOK); 1879 dotdothits = counter_u64_alloc(M_WAITOK); 1880 numchecks = counter_u64_alloc(M_WAITOK); 1881 nummiss = counter_u64_alloc(M_WAITOK); 1882 nummisszap = counter_u64_alloc(M_WAITOK); 1883 numposzaps = counter_u64_alloc(M_WAITOK); 1884 numposhits = counter_u64_alloc(M_WAITOK); 1885 numnegzaps = counter_u64_alloc(M_WAITOK); 1886 numneghits = counter_u64_alloc(M_WAITOK); 1887 numfullpathcalls = counter_u64_alloc(M_WAITOK); 1888 numfullpathfail1 = counter_u64_alloc(M_WAITOK); 1889 numfullpathfail2 = counter_u64_alloc(M_WAITOK); 1890 numfullpathfail4 = counter_u64_alloc(M_WAITOK); 1891 numfullpathfound = counter_u64_alloc(M_WAITOK); 1892 } 1893 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 1894 1895 void 1896 cache_changesize(int newmaxvnodes) 1897 { 1898 struct nchashhead *new_nchashtbl, *old_nchashtbl; 1899 u_long new_nchash, old_nchash; 1900 struct namecache *ncp; 1901 uint32_t hash; 1902 int i; 1903 1904 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2); 1905 if (newmaxvnodes < numbucketlocks) 1906 newmaxvnodes = numbucketlocks; 1907 1908 new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash); 1909 /* If same hash table size, nothing to do */ 1910 if (nchash == new_nchash) { 1911 free(new_nchashtbl, M_VFSCACHE); 1912 return; 1913 } 1914 /* 1915 * Move everything from the old hash table to the new table. 1916 * None of the namecache entries in the table can be removed 1917 * because to do so, they have to be removed from the hash table. 1918 */ 1919 cache_lock_all_vnodes(); 1920 cache_lock_all_buckets(); 1921 old_nchashtbl = nchashtbl; 1922 old_nchash = nchash; 1923 nchashtbl = new_nchashtbl; 1924 nchash = new_nchash; 1925 for (i = 0; i <= old_nchash; i++) { 1926 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) { 1927 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, 1928 ncp->nc_dvp); 1929 LIST_REMOVE(ncp, nc_hash); 1930 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash); 1931 } 1932 } 1933 cache_unlock_all_buckets(); 1934 cache_unlock_all_vnodes(); 1935 free(old_nchashtbl, M_VFSCACHE); 1936 } 1937 1938 /* 1939 * Invalidate all entries to a particular vnode. 1940 */ 1941 void 1942 cache_purge(struct vnode *vp) 1943 { 1944 TAILQ_HEAD(, namecache) ncps; 1945 struct namecache *ncp, *nnp; 1946 struct mtx *vlp, *vlp2; 1947 1948 CTR1(KTR_VFS, "cache_purge(%p)", vp); 1949 SDT_PROBE1(vfs, namecache, purge, done, vp); 1950 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) && 1951 vp->v_cache_dd == NULL) 1952 return; 1953 TAILQ_INIT(&ncps); 1954 vlp = VP2VNODELOCK(vp); 1955 vlp2 = NULL; 1956 mtx_lock(vlp); 1957 retry: 1958 while (!LIST_EMPTY(&vp->v_cache_src)) { 1959 ncp = LIST_FIRST(&vp->v_cache_src); 1960 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 1961 goto retry; 1962 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 1963 } 1964 while (!TAILQ_EMPTY(&vp->v_cache_dst)) { 1965 ncp = TAILQ_FIRST(&vp->v_cache_dst); 1966 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 1967 goto retry; 1968 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 1969 } 1970 ncp = vp->v_cache_dd; 1971 if (ncp != NULL) { 1972 KASSERT(ncp->nc_flag & NCF_ISDOTDOT, 1973 ("lost dotdot link")); 1974 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 1975 goto retry; 1976 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 1977 } 1978 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 1979 mtx_unlock(vlp); 1980 if (vlp2 != NULL) 1981 mtx_unlock(vlp2); 1982 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) { 1983 cache_free(ncp); 1984 } 1985 } 1986 1987 /* 1988 * Invalidate all negative entries for a particular directory vnode. 1989 */ 1990 void 1991 cache_purge_negative(struct vnode *vp) 1992 { 1993 TAILQ_HEAD(, namecache) ncps; 1994 struct namecache *ncp, *nnp; 1995 struct mtx *vlp; 1996 1997 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp); 1998 SDT_PROBE1(vfs, namecache, purge_negative, done, vp); 1999 if (LIST_EMPTY(&vp->v_cache_src)) 2000 return; 2001 TAILQ_INIT(&ncps); 2002 vlp = VP2VNODELOCK(vp); 2003 mtx_lock(vlp); 2004 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) { 2005 if (!(ncp->nc_flag & NCF_NEGATIVE)) 2006 continue; 2007 cache_zap_negative_locked_vnode_kl(ncp, vp); 2008 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 2009 } 2010 mtx_unlock(vlp); 2011 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) { 2012 cache_free(ncp); 2013 } 2014 } 2015 2016 /* 2017 * Flush all entries referencing a particular filesystem. 2018 */ 2019 void 2020 cache_purgevfs(struct mount *mp, bool force) 2021 { 2022 TAILQ_HEAD(, namecache) ncps; 2023 struct mtx *vlp1, *vlp2; 2024 struct rwlock *blp; 2025 struct nchashhead *bucket; 2026 struct namecache *ncp, *nnp; 2027 u_long i, j, n_nchash; 2028 int error; 2029 2030 /* Scan hash tables for applicable entries */ 2031 SDT_PROBE1(vfs, namecache, purgevfs, done, mp); 2032 if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes) 2033 return; 2034 TAILQ_INIT(&ncps); 2035 n_nchash = nchash + 1; 2036 vlp1 = vlp2 = NULL; 2037 for (i = 0; i < numbucketlocks; i++) { 2038 blp = (struct rwlock *)&bucketlocks[i]; 2039 rw_wlock(blp); 2040 for (j = i; j < n_nchash; j += numbucketlocks) { 2041 retry: 2042 bucket = &nchashtbl[j]; 2043 LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) { 2044 cache_assert_bucket_locked(ncp, RA_WLOCKED); 2045 if (ncp->nc_dvp->v_mount != mp) 2046 continue; 2047 error = cache_zap_wlocked_bucket_kl(ncp, blp, 2048 &vlp1, &vlp2); 2049 if (error != 0) 2050 goto retry; 2051 TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst); 2052 } 2053 } 2054 rw_wunlock(blp); 2055 if (vlp1 == NULL && vlp2 == NULL) 2056 cache_maybe_yield(); 2057 } 2058 if (vlp1 != NULL) 2059 mtx_unlock(vlp1); 2060 if (vlp2 != NULL) 2061 mtx_unlock(vlp2); 2062 2063 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) { 2064 cache_free(ncp); 2065 } 2066 } 2067 2068 /* 2069 * Perform canonical checks and cache lookup and pass on to filesystem 2070 * through the vop_cachedlookup only if needed. 2071 */ 2072 2073 int 2074 vfs_cache_lookup(struct vop_lookup_args *ap) 2075 { 2076 struct vnode *dvp; 2077 int error; 2078 struct vnode **vpp = ap->a_vpp; 2079 struct componentname *cnp = ap->a_cnp; 2080 struct ucred *cred = cnp->cn_cred; 2081 int flags = cnp->cn_flags; 2082 struct thread *td = cnp->cn_thread; 2083 2084 *vpp = NULL; 2085 dvp = ap->a_dvp; 2086 2087 if (dvp->v_type != VDIR) 2088 return (ENOTDIR); 2089 2090 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 2091 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 2092 return (EROFS); 2093 2094 error = VOP_ACCESS(dvp, VEXEC, cred, td); 2095 if (error) 2096 return (error); 2097 2098 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 2099 if (error == 0) 2100 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 2101 if (error == -1) 2102 return (0); 2103 return (error); 2104 } 2105 2106 /* 2107 * XXX All of these sysctls would probably be more productive dead. 2108 */ 2109 static int __read_mostly disablecwd; 2110 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 2111 "Disable the getcwd syscall"); 2112 2113 /* Implementation of the getcwd syscall. */ 2114 int 2115 sys___getcwd(struct thread *td, struct __getcwd_args *uap) 2116 { 2117 2118 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen, 2119 MAXPATHLEN)); 2120 } 2121 2122 int 2123 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen, 2124 size_t path_max) 2125 { 2126 char *bp, *tmpbuf; 2127 struct filedesc *fdp; 2128 struct vnode *cdir, *rdir; 2129 int error; 2130 2131 if (__predict_false(disablecwd)) 2132 return (ENODEV); 2133 if (__predict_false(buflen < 2)) 2134 return (EINVAL); 2135 if (buflen > path_max) 2136 buflen = path_max; 2137 2138 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 2139 fdp = td->td_proc->p_fd; 2140 FILEDESC_SLOCK(fdp); 2141 cdir = fdp->fd_cdir; 2142 vrefact(cdir); 2143 rdir = fdp->fd_rdir; 2144 vrefact(rdir); 2145 FILEDESC_SUNLOCK(fdp); 2146 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 2147 vrele(rdir); 2148 vrele(cdir); 2149 2150 if (!error) { 2151 if (bufseg == UIO_SYSSPACE) 2152 bcopy(bp, buf, strlen(bp) + 1); 2153 else 2154 error = copyout(bp, buf, strlen(bp) + 1); 2155 #ifdef KTRACE 2156 if (KTRPOINT(curthread, KTR_NAMEI)) 2157 ktrnamei(bp); 2158 #endif 2159 } 2160 free(tmpbuf, M_TEMP); 2161 return (error); 2162 } 2163 2164 /* 2165 * Thus begins the fullpath magic. 2166 */ 2167 2168 static int __read_mostly disablefullpath; 2169 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 2170 "Disable the vn_fullpath function"); 2171 2172 /* 2173 * Retrieve the full filesystem path that correspond to a vnode from the name 2174 * cache (if available) 2175 */ 2176 int 2177 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 2178 { 2179 char *buf; 2180 struct filedesc *fdp; 2181 struct vnode *rdir; 2182 int error; 2183 2184 if (__predict_false(disablefullpath)) 2185 return (ENODEV); 2186 if (__predict_false(vn == NULL)) 2187 return (EINVAL); 2188 2189 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2190 fdp = td->td_proc->p_fd; 2191 FILEDESC_SLOCK(fdp); 2192 rdir = fdp->fd_rdir; 2193 vrefact(rdir); 2194 FILEDESC_SUNLOCK(fdp); 2195 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 2196 vrele(rdir); 2197 2198 if (!error) 2199 *freebuf = buf; 2200 else 2201 free(buf, M_TEMP); 2202 return (error); 2203 } 2204 2205 /* 2206 * This function is similar to vn_fullpath, but it attempts to lookup the 2207 * pathname relative to the global root mount point. This is required for the 2208 * auditing sub-system, as audited pathnames must be absolute, relative to the 2209 * global root mount point. 2210 */ 2211 int 2212 vn_fullpath_global(struct thread *td, struct vnode *vn, 2213 char **retbuf, char **freebuf) 2214 { 2215 char *buf; 2216 int error; 2217 2218 if (__predict_false(disablefullpath)) 2219 return (ENODEV); 2220 if (__predict_false(vn == NULL)) 2221 return (EINVAL); 2222 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2223 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 2224 if (!error) 2225 *freebuf = buf; 2226 else 2227 free(buf, M_TEMP); 2228 return (error); 2229 } 2230 2231 int 2232 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) 2233 { 2234 struct vnode *dvp; 2235 struct namecache *ncp; 2236 struct mtx *vlp; 2237 int error; 2238 2239 vlp = VP2VNODELOCK(*vp); 2240 mtx_lock(vlp); 2241 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) { 2242 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 2243 break; 2244 } 2245 if (ncp != NULL) { 2246 if (*buflen < ncp->nc_nlen) { 2247 mtx_unlock(vlp); 2248 vrele(*vp); 2249 counter_u64_add(numfullpathfail4, 1); 2250 error = ENOMEM; 2251 SDT_PROBE3(vfs, namecache, fullpath, return, error, 2252 vp, NULL); 2253 return (error); 2254 } 2255 *buflen -= ncp->nc_nlen; 2256 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); 2257 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp, 2258 ncp->nc_name, vp); 2259 dvp = *vp; 2260 *vp = ncp->nc_dvp; 2261 vref(*vp); 2262 mtx_unlock(vlp); 2263 vrele(dvp); 2264 return (0); 2265 } 2266 SDT_PROBE1(vfs, namecache, fullpath, miss, vp); 2267 2268 mtx_unlock(vlp); 2269 vn_lock(*vp, LK_SHARED | LK_RETRY); 2270 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen); 2271 vput(*vp); 2272 if (error) { 2273 counter_u64_add(numfullpathfail2, 1); 2274 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 2275 return (error); 2276 } 2277 2278 *vp = dvp; 2279 if (dvp->v_iflag & VI_DOOMED) { 2280 /* forced unmount */ 2281 vrele(dvp); 2282 error = ENOENT; 2283 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 2284 return (error); 2285 } 2286 /* 2287 * *vp has its use count incremented still. 2288 */ 2289 2290 return (0); 2291 } 2292 2293 /* 2294 * The magic behind kern___getcwd() and vn_fullpath(). 2295 */ 2296 static int 2297 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 2298 char *buf, char **retbuf, u_int buflen) 2299 { 2300 int error, slash_prefixed; 2301 #ifdef KDTRACE_HOOKS 2302 struct vnode *startvp = vp; 2303 #endif 2304 struct vnode *vp1; 2305 2306 buflen--; 2307 buf[buflen] = '\0'; 2308 error = 0; 2309 slash_prefixed = 0; 2310 2311 SDT_PROBE1(vfs, namecache, fullpath, entry, vp); 2312 counter_u64_add(numfullpathcalls, 1); 2313 vref(vp); 2314 if (vp->v_type != VDIR) { 2315 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen); 2316 if (error) 2317 return (error); 2318 if (buflen == 0) { 2319 vrele(vp); 2320 return (ENOMEM); 2321 } 2322 buf[--buflen] = '/'; 2323 slash_prefixed = 1; 2324 } 2325 while (vp != rdir && vp != rootvnode) { 2326 /* 2327 * The vp vnode must be already fully constructed, 2328 * since it is either found in namecache or obtained 2329 * from VOP_VPTOCNP(). We may test for VV_ROOT safely 2330 * without obtaining the vnode lock. 2331 */ 2332 if ((vp->v_vflag & VV_ROOT) != 0) { 2333 vn_lock(vp, LK_RETRY | LK_SHARED); 2334 2335 /* 2336 * With the vnode locked, check for races with 2337 * unmount, forced or not. Note that we 2338 * already verified that vp is not equal to 2339 * the root vnode, which means that 2340 * mnt_vnodecovered can be NULL only for the 2341 * case of unmount. 2342 */ 2343 if ((vp->v_iflag & VI_DOOMED) != 0 || 2344 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL || 2345 vp1->v_mountedhere != vp->v_mount) { 2346 vput(vp); 2347 error = ENOENT; 2348 SDT_PROBE3(vfs, namecache, fullpath, return, 2349 error, vp, NULL); 2350 break; 2351 } 2352 2353 vref(vp1); 2354 vput(vp); 2355 vp = vp1; 2356 continue; 2357 } 2358 if (vp->v_type != VDIR) { 2359 vrele(vp); 2360 counter_u64_add(numfullpathfail1, 1); 2361 error = ENOTDIR; 2362 SDT_PROBE3(vfs, namecache, fullpath, return, 2363 error, vp, NULL); 2364 break; 2365 } 2366 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen); 2367 if (error) 2368 break; 2369 if (buflen == 0) { 2370 vrele(vp); 2371 error = ENOMEM; 2372 SDT_PROBE3(vfs, namecache, fullpath, return, error, 2373 startvp, NULL); 2374 break; 2375 } 2376 buf[--buflen] = '/'; 2377 slash_prefixed = 1; 2378 } 2379 if (error) 2380 return (error); 2381 if (!slash_prefixed) { 2382 if (buflen == 0) { 2383 vrele(vp); 2384 counter_u64_add(numfullpathfail4, 1); 2385 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM, 2386 startvp, NULL); 2387 return (ENOMEM); 2388 } 2389 buf[--buflen] = '/'; 2390 } 2391 counter_u64_add(numfullpathfound, 1); 2392 vrele(vp); 2393 2394 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen); 2395 *retbuf = buf + buflen; 2396 return (0); 2397 } 2398 2399 struct vnode * 2400 vn_dir_dd_ino(struct vnode *vp) 2401 { 2402 struct namecache *ncp; 2403 struct vnode *ddvp; 2404 struct mtx *vlp; 2405 2406 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 2407 vlp = VP2VNODELOCK(vp); 2408 mtx_lock(vlp); 2409 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 2410 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 2411 continue; 2412 ddvp = ncp->nc_dvp; 2413 vhold(ddvp); 2414 mtx_unlock(vlp); 2415 if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread)) 2416 return (NULL); 2417 return (ddvp); 2418 } 2419 mtx_unlock(vlp); 2420 return (NULL); 2421 } 2422 2423 int 2424 vn_commname(struct vnode *vp, char *buf, u_int buflen) 2425 { 2426 struct namecache *ncp; 2427 struct mtx *vlp; 2428 int l; 2429 2430 vlp = VP2VNODELOCK(vp); 2431 mtx_lock(vlp); 2432 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 2433 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 2434 break; 2435 if (ncp == NULL) { 2436 mtx_unlock(vlp); 2437 return (ENOENT); 2438 } 2439 l = min(ncp->nc_nlen, buflen - 1); 2440 memcpy(buf, ncp->nc_name, l); 2441 mtx_unlock(vlp); 2442 buf[l] = '\0'; 2443 return (0); 2444 } 2445 2446 /* ABI compat shims for old kernel modules. */ 2447 #undef cache_enter 2448 2449 void cache_enter(struct vnode *dvp, struct vnode *vp, 2450 struct componentname *cnp); 2451 2452 void 2453 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 2454 { 2455 2456 cache_enter_time(dvp, vp, cnp, NULL, NULL); 2457 } 2458 2459 /* 2460 * This function updates path string to vnode's full global path 2461 * and checks the size of the new path string against the pathlen argument. 2462 * 2463 * Requires a locked, referenced vnode. 2464 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 2465 * 2466 * If sysctl debug.disablefullpath is set, ENODEV is returned, 2467 * vnode is left locked and path remain untouched. 2468 * 2469 * If vp is a directory, the call to vn_fullpath_global() always succeeds 2470 * because it falls back to the ".." lookup if the namecache lookup fails. 2471 */ 2472 int 2473 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 2474 u_int pathlen) 2475 { 2476 struct nameidata nd; 2477 struct vnode *vp1; 2478 char *rpath, *fbuf; 2479 int error; 2480 2481 ASSERT_VOP_ELOCKED(vp, __func__); 2482 2483 /* Return ENODEV if sysctl debug.disablefullpath==1 */ 2484 if (__predict_false(disablefullpath)) 2485 return (ENODEV); 2486 2487 /* Construct global filesystem path from vp. */ 2488 VOP_UNLOCK(vp, 0); 2489 error = vn_fullpath_global(td, vp, &rpath, &fbuf); 2490 2491 if (error != 0) { 2492 vrele(vp); 2493 return (error); 2494 } 2495 2496 if (strlen(rpath) >= pathlen) { 2497 vrele(vp); 2498 error = ENAMETOOLONG; 2499 goto out; 2500 } 2501 2502 /* 2503 * Re-lookup the vnode by path to detect a possible rename. 2504 * As a side effect, the vnode is relocked. 2505 * If vnode was renamed, return ENOENT. 2506 */ 2507 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 2508 UIO_SYSSPACE, path, td); 2509 error = namei(&nd); 2510 if (error != 0) { 2511 vrele(vp); 2512 goto out; 2513 } 2514 NDFREE(&nd, NDF_ONLY_PNBUF); 2515 vp1 = nd.ni_vp; 2516 vrele(vp); 2517 if (vp1 == vp) 2518 strcpy(path, rpath); 2519 else { 2520 vput(vp1); 2521 error = ENOENT; 2522 } 2523 2524 out: 2525 free(fbuf, M_TEMP); 2526 return (error); 2527 } 2528