1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993, 1995 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Poul-Henning Kamp of the FreeBSD Project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 35 */ 36 37 #include <sys/cdefs.h> 38 __FBSDID("$FreeBSD$"); 39 40 #include "opt_ddb.h" 41 #include "opt_ktrace.h" 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/counter.h> 46 #include <sys/filedesc.h> 47 #include <sys/fnv_hash.h> 48 #include <sys/kernel.h> 49 #include <sys/ktr.h> 50 #include <sys/lock.h> 51 #include <sys/malloc.h> 52 #include <sys/fcntl.h> 53 #include <sys/mount.h> 54 #include <sys/namei.h> 55 #include <sys/proc.h> 56 #include <sys/rwlock.h> 57 #include <sys/sdt.h> 58 #include <sys/smp.h> 59 #include <sys/syscallsubr.h> 60 #include <sys/sysctl.h> 61 #include <sys/sysproto.h> 62 #include <sys/vnode.h> 63 #ifdef KTRACE 64 #include <sys/ktrace.h> 65 #endif 66 67 #ifdef DDB 68 #include <ddb/ddb.h> 69 #endif 70 71 #include <vm/uma.h> 72 73 SDT_PROVIDER_DECLARE(vfs); 74 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 75 "struct vnode *"); 76 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 77 "char *"); 78 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 79 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 80 "char *", "struct vnode *"); 81 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 82 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 83 "struct vnode *", "char *"); 84 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 85 "struct vnode *"); 86 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 87 "struct vnode *", "char *"); 88 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 89 "char *"); 90 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *"); 91 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 92 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 93 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 94 "struct vnode *"); 95 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", 96 "char *"); 97 SDT_PROBE_DEFINE2(vfs, namecache, shrink_negative, done, "struct vnode *", 98 "char *"); 99 100 /* 101 * This structure describes the elements in the cache of recent 102 * names looked up by namei. 103 */ 104 105 struct namecache { 106 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 107 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 108 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 109 struct vnode *nc_dvp; /* vnode of parent of name */ 110 union { 111 struct vnode *nu_vp; /* vnode the name refers to */ 112 } n_un; 113 u_char nc_flag; /* flag bits */ 114 u_char nc_nlen; /* length of name */ 115 char nc_name[0]; /* segment name + nul */ 116 }; 117 118 /* 119 * struct namecache_ts repeats struct namecache layout up to the 120 * nc_nlen member. 121 * struct namecache_ts is used in place of struct namecache when time(s) need 122 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 123 * both a non-dotdot directory name plus dotdot for the directory's 124 * parent. 125 */ 126 struct namecache_ts { 127 struct timespec nc_time; /* timespec provided by fs */ 128 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 129 int nc_ticks; /* ticks value when entry was added */ 130 struct namecache nc_nc; 131 }; 132 133 #define nc_vp n_un.nu_vp 134 135 /* 136 * Flags in namecache.nc_flag 137 */ 138 #define NCF_WHITE 0x01 139 #define NCF_ISDOTDOT 0x02 140 #define NCF_TS 0x04 141 #define NCF_DTS 0x08 142 #define NCF_DVDROP 0x10 143 #define NCF_NEGATIVE 0x20 144 #define NCF_HOTNEGATIVE 0x40 145 146 /* 147 * Name caching works as follows: 148 * 149 * Names found by directory scans are retained in a cache 150 * for future reference. It is managed LRU, so frequently 151 * used names will hang around. Cache is indexed by hash value 152 * obtained from (dvp, name) where dvp refers to the directory 153 * containing name. 154 * 155 * If it is a "negative" entry, (i.e. for a name that is known NOT to 156 * exist) the vnode pointer will be NULL. 157 * 158 * Upon reaching the last segment of a path, if the reference 159 * is for DELETE, or NOCACHE is set (rewrite), and the 160 * name is located in the cache, it will be dropped. 161 * 162 * These locks are used (in the order in which they can be taken): 163 * NAME TYPE ROLE 164 * vnodelock mtx vnode lists and v_cache_dd field protection 165 * bucketlock rwlock for access to given set of hash buckets 166 * neglist mtx negative entry LRU management 167 * 168 * Additionally, ncneg_shrink_lock mtx is used to have at most one thread 169 * shrinking the LRU list. 170 * 171 * It is legal to take multiple vnodelock and bucketlock locks. The locking 172 * order is lower address first. Both are recursive. 173 * 174 * "." lookups are lockless. 175 * 176 * ".." and vnode -> name lookups require vnodelock. 177 * 178 * name -> vnode lookup requires the relevant bucketlock to be held for reading. 179 * 180 * Insertions and removals of entries require involved vnodes and bucketlocks 181 * to be write-locked to prevent other threads from seeing the entry. 182 * 183 * Some lookups result in removal of the found entry (e.g. getting rid of a 184 * negative entry with the intent to create a positive one), which poses a 185 * problem when multiple threads reach the state. Similarly, two different 186 * threads can purge two different vnodes and try to remove the same name. 187 * 188 * If the already held vnode lock is lower than the second required lock, we 189 * can just take the other lock. However, in the opposite case, this could 190 * deadlock. As such, this is resolved by trylocking and if that fails unlocking 191 * the first node, locking everything in order and revalidating the state. 192 */ 193 194 /* 195 * Structures associated with name caching. 196 */ 197 #define NCHHASH(hash) \ 198 (&nchashtbl[(hash) & nchash]) 199 static __read_mostly LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */ 200 static u_long __read_mostly nchash; /* size of hash table */ 201 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 202 "Size of namecache hash table"); 203 static u_long __read_mostly ncnegfactor = 5; /* ratio of negative entries */ 204 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 205 "Ratio of negative namecache entries"); 206 static u_long __exclusive_cache_line numneg; /* number of negative entries allocated */ 207 static u_long __exclusive_cache_line numcache;/* number of cache entries allocated */ 208 u_int ncsizefactor = 2; 209 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0, 210 "Size factor for namecache"); 211 static u_int __read_mostly ncpurgeminvnodes; 212 SYSCTL_UINT(_vfs, OID_AUTO, ncpurgeminvnodes, CTLFLAG_RW, &ncpurgeminvnodes, 0, 213 "Number of vnodes below which purgevfs ignores the request"); 214 static u_int __read_mostly ncsize; /* the size as computed on creation or resizing */ 215 216 struct nchstats nchstats; /* cache effectiveness statistics */ 217 218 static struct mtx __exclusive_cache_line ncneg_shrink_lock; 219 static int shrink_list_turn; 220 221 struct neglist { 222 struct mtx nl_lock; 223 TAILQ_HEAD(, namecache) nl_list; 224 } __aligned(CACHE_LINE_SIZE); 225 226 static struct neglist __read_mostly *neglists; 227 static struct neglist ncneg_hot; 228 static u_long numhotneg; 229 230 #define numneglists (ncneghash + 1) 231 static u_int __read_mostly ncneghash; 232 static inline struct neglist * 233 NCP2NEGLIST(struct namecache *ncp) 234 { 235 236 return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]); 237 } 238 239 #define numbucketlocks (ncbuckethash + 1) 240 static u_int __read_mostly ncbuckethash; 241 static struct rwlock_padalign __read_mostly *bucketlocks; 242 #define HASH2BUCKETLOCK(hash) \ 243 ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)])) 244 245 #define numvnodelocks (ncvnodehash + 1) 246 static u_int __read_mostly ncvnodehash; 247 static struct mtx __read_mostly *vnodelocks; 248 static inline struct mtx * 249 VP2VNODELOCK(struct vnode *vp) 250 { 251 252 return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]); 253 } 254 255 /* 256 * UMA zones for the VFS cache. 257 * 258 * The small cache is used for entries with short names, which are the 259 * most common. The large cache is used for entries which are too big to 260 * fit in the small cache. 261 */ 262 static uma_zone_t __read_mostly cache_zone_small; 263 static uma_zone_t __read_mostly cache_zone_small_ts; 264 static uma_zone_t __read_mostly cache_zone_large; 265 static uma_zone_t __read_mostly cache_zone_large_ts; 266 267 #define CACHE_PATH_CUTOFF 35 268 269 static struct namecache * 270 cache_alloc(int len, int ts) 271 { 272 struct namecache_ts *ncp_ts; 273 struct namecache *ncp; 274 275 if (__predict_false(ts)) { 276 if (len <= CACHE_PATH_CUTOFF) 277 ncp_ts = uma_zalloc(cache_zone_small_ts, M_WAITOK); 278 else 279 ncp_ts = uma_zalloc(cache_zone_large_ts, M_WAITOK); 280 ncp = &ncp_ts->nc_nc; 281 } else { 282 if (len <= CACHE_PATH_CUTOFF) 283 ncp = uma_zalloc(cache_zone_small, M_WAITOK); 284 else 285 ncp = uma_zalloc(cache_zone_large, M_WAITOK); 286 } 287 return (ncp); 288 } 289 290 static void 291 cache_free(struct namecache *ncp) 292 { 293 struct namecache_ts *ncp_ts; 294 295 if (ncp == NULL) 296 return; 297 if ((ncp->nc_flag & NCF_DVDROP) != 0) 298 vdrop(ncp->nc_dvp); 299 if (__predict_false(ncp->nc_flag & NCF_TS)) { 300 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 301 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 302 uma_zfree(cache_zone_small_ts, ncp_ts); 303 else 304 uma_zfree(cache_zone_large_ts, ncp_ts); 305 } else { 306 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) 307 uma_zfree(cache_zone_small, ncp); 308 else 309 uma_zfree(cache_zone_large, ncp); 310 } 311 } 312 313 static void 314 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 315 { 316 struct namecache_ts *ncp_ts; 317 318 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 319 (tsp == NULL && ticksp == NULL), 320 ("No NCF_TS")); 321 322 if (tsp == NULL && ticksp == NULL) 323 return; 324 325 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 326 if (tsp != NULL) 327 *tsp = ncp_ts->nc_time; 328 if (ticksp != NULL) 329 *ticksp = ncp_ts->nc_ticks; 330 } 331 332 #ifdef DEBUG_CACHE 333 static int __read_mostly doingcache = 1; /* 1 => enable the cache */ 334 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 335 "VFS namecache enabled"); 336 #endif 337 338 /* Export size information to userland */ 339 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 340 sizeof(struct namecache), "sizeof(struct namecache)"); 341 342 /* 343 * The new name cache statistics 344 */ 345 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, 346 "Name cache statistics"); 347 #define STATNODE_ULONG(name, descr) \ 348 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr); 349 #define STATNODE_COUNTER(name, descr) \ 350 static counter_u64_t __read_mostly name; \ 351 SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr); 352 STATNODE_ULONG(numneg, "Number of negative cache entries"); 353 STATNODE_ULONG(numcache, "Number of cache entries"); 354 STATNODE_COUNTER(numcachehv, "Number of namecache entries with vnodes held"); 355 STATNODE_COUNTER(numcalls, "Number of cache lookups"); 356 STATNODE_COUNTER(dothits, "Number of '.' hits"); 357 STATNODE_COUNTER(dotdothits, "Number of '..' hits"); 358 STATNODE_COUNTER(numchecks, "Number of checks in lookup"); 359 STATNODE_COUNTER(nummiss, "Number of cache misses"); 360 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache"); 361 STATNODE_COUNTER(numposzaps, 362 "Number of cache hits (positive) we do not want to cache"); 363 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)"); 364 STATNODE_COUNTER(numnegzaps, 365 "Number of cache hits (negative) we do not want to cache"); 366 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)"); 367 /* These count for kern___getcwd(), too. */ 368 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls"); 369 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 370 STATNODE_COUNTER(numfullpathfail2, 371 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 372 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 373 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls"); 374 STATNODE_COUNTER(zap_and_exit_bucket_relock_success, 375 "Number of successful removals after relocking"); 376 static long zap_and_exit_bucket_fail; STATNODE_ULONG(zap_and_exit_bucket_fail, 377 "Number of times zap_and_exit failed to lock"); 378 static long zap_and_exit_bucket_fail2; STATNODE_ULONG(zap_and_exit_bucket_fail2, 379 "Number of times zap_and_exit failed to lock"); 380 static long cache_lock_vnodes_cel_3_failures; 381 STATNODE_ULONG(cache_lock_vnodes_cel_3_failures, 382 "Number of times 3-way vnode locking failed"); 383 STATNODE_ULONG(numhotneg, "Number of hot negative entries"); 384 STATNODE_COUNTER(numneg_evicted, 385 "Number of negative entries evicted when adding a new entry"); 386 STATNODE_COUNTER(shrinking_skipped, 387 "Number of times shrinking was already in progress"); 388 389 static void cache_zap_locked(struct namecache *ncp, bool neg_locked); 390 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 391 char *buf, char **retbuf, u_int buflen); 392 393 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 394 395 static int cache_yield; 396 SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0, 397 "Number of times cache called yield"); 398 399 static void __noinline 400 cache_maybe_yield(void) 401 { 402 403 if (should_yield()) { 404 cache_yield++; 405 kern_yield(PRI_USER); 406 } 407 } 408 409 static inline void 410 cache_assert_vlp_locked(struct mtx *vlp) 411 { 412 413 if (vlp != NULL) 414 mtx_assert(vlp, MA_OWNED); 415 } 416 417 static inline void 418 cache_assert_vnode_locked(struct vnode *vp) 419 { 420 struct mtx *vlp; 421 422 vlp = VP2VNODELOCK(vp); 423 cache_assert_vlp_locked(vlp); 424 } 425 426 static uint32_t 427 cache_get_hash(char *name, u_char len, struct vnode *dvp) 428 { 429 uint32_t hash; 430 431 hash = fnv_32_buf(name, len, FNV1_32_INIT); 432 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 433 return (hash); 434 } 435 436 static inline struct rwlock * 437 NCP2BUCKETLOCK(struct namecache *ncp) 438 { 439 uint32_t hash; 440 441 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp); 442 return (HASH2BUCKETLOCK(hash)); 443 } 444 445 #ifdef INVARIANTS 446 static void 447 cache_assert_bucket_locked(struct namecache *ncp, int mode) 448 { 449 struct rwlock *blp; 450 451 blp = NCP2BUCKETLOCK(ncp); 452 rw_assert(blp, mode); 453 } 454 #else 455 #define cache_assert_bucket_locked(x, y) do { } while (0) 456 #endif 457 458 #define cache_sort_vnodes(x, y) _cache_sort_vnodes((void **)(x), (void **)(y)) 459 static void 460 _cache_sort_vnodes(void **p1, void **p2) 461 { 462 void *tmp; 463 464 MPASS(*p1 != NULL || *p2 != NULL); 465 466 if (*p1 > *p2) { 467 tmp = *p2; 468 *p2 = *p1; 469 *p1 = tmp; 470 } 471 } 472 473 static void 474 cache_lock_all_buckets(void) 475 { 476 u_int i; 477 478 for (i = 0; i < numbucketlocks; i++) 479 rw_wlock(&bucketlocks[i]); 480 } 481 482 static void 483 cache_unlock_all_buckets(void) 484 { 485 u_int i; 486 487 for (i = 0; i < numbucketlocks; i++) 488 rw_wunlock(&bucketlocks[i]); 489 } 490 491 static void 492 cache_lock_all_vnodes(void) 493 { 494 u_int i; 495 496 for (i = 0; i < numvnodelocks; i++) 497 mtx_lock(&vnodelocks[i]); 498 } 499 500 static void 501 cache_unlock_all_vnodes(void) 502 { 503 u_int i; 504 505 for (i = 0; i < numvnodelocks; i++) 506 mtx_unlock(&vnodelocks[i]); 507 } 508 509 static int 510 cache_trylock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 511 { 512 513 cache_sort_vnodes(&vlp1, &vlp2); 514 515 if (vlp1 != NULL) { 516 if (!mtx_trylock(vlp1)) 517 return (EAGAIN); 518 } 519 if (!mtx_trylock(vlp2)) { 520 if (vlp1 != NULL) 521 mtx_unlock(vlp1); 522 return (EAGAIN); 523 } 524 525 return (0); 526 } 527 528 static void 529 cache_lock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 530 { 531 532 MPASS(vlp1 != NULL || vlp2 != NULL); 533 MPASS(vlp1 <= vlp2); 534 535 if (vlp1 != NULL) 536 mtx_lock(vlp1); 537 if (vlp2 != NULL) 538 mtx_lock(vlp2); 539 } 540 541 static void 542 cache_unlock_vnodes(struct mtx *vlp1, struct mtx *vlp2) 543 { 544 545 MPASS(vlp1 != NULL || vlp2 != NULL); 546 547 if (vlp1 != NULL) 548 mtx_unlock(vlp1); 549 if (vlp2 != NULL) 550 mtx_unlock(vlp2); 551 } 552 553 static int 554 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 555 { 556 struct nchstats snap; 557 558 if (req->oldptr == NULL) 559 return (SYSCTL_OUT(req, 0, sizeof(snap))); 560 561 snap = nchstats; 562 snap.ncs_goodhits = counter_u64_fetch(numposhits); 563 snap.ncs_neghits = counter_u64_fetch(numneghits); 564 snap.ncs_badhits = counter_u64_fetch(numposzaps) + 565 counter_u64_fetch(numnegzaps); 566 snap.ncs_miss = counter_u64_fetch(nummisszap) + 567 counter_u64_fetch(nummiss); 568 569 return (SYSCTL_OUT(req, &snap, sizeof(snap))); 570 } 571 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD | 572 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU", 573 "VFS cache effectiveness statistics"); 574 575 #ifdef DIAGNOSTIC 576 /* 577 * Grab an atomic snapshot of the name cache hash chain lengths 578 */ 579 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, 580 "hash table stats"); 581 582 static int 583 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 584 { 585 struct nchashhead *ncpp; 586 struct namecache *ncp; 587 int i, error, n_nchash, *cntbuf; 588 589 retry: 590 n_nchash = nchash + 1; /* nchash is max index, not count */ 591 if (req->oldptr == NULL) 592 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 593 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 594 cache_lock_all_buckets(); 595 if (n_nchash != nchash + 1) { 596 cache_unlock_all_buckets(); 597 free(cntbuf, M_TEMP); 598 goto retry; 599 } 600 /* Scan hash tables counting entries */ 601 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 602 LIST_FOREACH(ncp, ncpp, nc_hash) 603 cntbuf[i]++; 604 cache_unlock_all_buckets(); 605 for (error = 0, i = 0; i < n_nchash; i++) 606 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 607 break; 608 free(cntbuf, M_TEMP); 609 return (error); 610 } 611 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 612 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 613 "nchash chain lengths"); 614 615 static int 616 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 617 { 618 int error; 619 struct nchashhead *ncpp; 620 struct namecache *ncp; 621 int n_nchash; 622 int count, maxlength, used, pct; 623 624 if (!req->oldptr) 625 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 626 627 cache_lock_all_buckets(); 628 n_nchash = nchash + 1; /* nchash is max index, not count */ 629 used = 0; 630 maxlength = 0; 631 632 /* Scan hash tables for applicable entries */ 633 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 634 count = 0; 635 LIST_FOREACH(ncp, ncpp, nc_hash) { 636 count++; 637 } 638 if (count) 639 used++; 640 if (maxlength < count) 641 maxlength = count; 642 } 643 n_nchash = nchash + 1; 644 cache_unlock_all_buckets(); 645 pct = (used * 100) / (n_nchash / 100); 646 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 647 if (error) 648 return (error); 649 error = SYSCTL_OUT(req, &used, sizeof(used)); 650 if (error) 651 return (error); 652 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 653 if (error) 654 return (error); 655 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 656 if (error) 657 return (error); 658 return (0); 659 } 660 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 661 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 662 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 663 #endif 664 665 /* 666 * Negative entries management 667 * 668 * A variation of LRU scheme is used. New entries are hashed into one of 669 * numneglists cold lists. Entries get promoted to the hot list on first hit. 670 * 671 * The shrinker will demote hot list head and evict from the cold list in a 672 * round-robin manner. 673 */ 674 static void 675 cache_negative_hit(struct namecache *ncp) 676 { 677 struct neglist *neglist; 678 679 MPASS(ncp->nc_flag & NCF_NEGATIVE); 680 if (ncp->nc_flag & NCF_HOTNEGATIVE) 681 return; 682 neglist = NCP2NEGLIST(ncp); 683 mtx_lock(&ncneg_hot.nl_lock); 684 mtx_lock(&neglist->nl_lock); 685 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) { 686 numhotneg++; 687 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst); 688 TAILQ_INSERT_TAIL(&ncneg_hot.nl_list, ncp, nc_dst); 689 ncp->nc_flag |= NCF_HOTNEGATIVE; 690 } 691 mtx_unlock(&neglist->nl_lock); 692 mtx_unlock(&ncneg_hot.nl_lock); 693 } 694 695 static void 696 cache_negative_insert(struct namecache *ncp, bool neg_locked) 697 { 698 struct neglist *neglist; 699 700 MPASS(ncp->nc_flag & NCF_NEGATIVE); 701 cache_assert_bucket_locked(ncp, RA_WLOCKED); 702 neglist = NCP2NEGLIST(ncp); 703 if (!neg_locked) { 704 mtx_lock(&neglist->nl_lock); 705 } else { 706 mtx_assert(&neglist->nl_lock, MA_OWNED); 707 } 708 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst); 709 if (!neg_locked) 710 mtx_unlock(&neglist->nl_lock); 711 atomic_add_rel_long(&numneg, 1); 712 } 713 714 static void 715 cache_negative_remove(struct namecache *ncp, bool neg_locked) 716 { 717 struct neglist *neglist; 718 bool hot_locked = false; 719 bool list_locked = false; 720 721 MPASS(ncp->nc_flag & NCF_NEGATIVE); 722 cache_assert_bucket_locked(ncp, RA_WLOCKED); 723 neglist = NCP2NEGLIST(ncp); 724 if (!neg_locked) { 725 if (ncp->nc_flag & NCF_HOTNEGATIVE) { 726 hot_locked = true; 727 mtx_lock(&ncneg_hot.nl_lock); 728 if (!(ncp->nc_flag & NCF_HOTNEGATIVE)) { 729 list_locked = true; 730 mtx_lock(&neglist->nl_lock); 731 } 732 } else { 733 list_locked = true; 734 mtx_lock(&neglist->nl_lock); 735 } 736 } 737 if (ncp->nc_flag & NCF_HOTNEGATIVE) { 738 mtx_assert(&ncneg_hot.nl_lock, MA_OWNED); 739 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst); 740 numhotneg--; 741 } else { 742 mtx_assert(&neglist->nl_lock, MA_OWNED); 743 TAILQ_REMOVE(&neglist->nl_list, ncp, nc_dst); 744 } 745 if (list_locked) 746 mtx_unlock(&neglist->nl_lock); 747 if (hot_locked) 748 mtx_unlock(&ncneg_hot.nl_lock); 749 atomic_subtract_rel_long(&numneg, 1); 750 } 751 752 static void 753 cache_negative_shrink_select(int start, struct namecache **ncpp, 754 struct neglist **neglistpp) 755 { 756 struct neglist *neglist; 757 struct namecache *ncp; 758 int i; 759 760 *ncpp = ncp = NULL; 761 neglist = NULL; 762 763 for (i = start; i < numneglists; i++) { 764 neglist = &neglists[i]; 765 if (TAILQ_FIRST(&neglist->nl_list) == NULL) 766 continue; 767 mtx_lock(&neglist->nl_lock); 768 ncp = TAILQ_FIRST(&neglist->nl_list); 769 if (ncp != NULL) 770 break; 771 mtx_unlock(&neglist->nl_lock); 772 } 773 774 *neglistpp = neglist; 775 *ncpp = ncp; 776 } 777 778 static void 779 cache_negative_zap_one(void) 780 { 781 struct namecache *ncp, *ncp2; 782 struct neglist *neglist; 783 struct mtx *dvlp; 784 struct rwlock *blp; 785 786 if (mtx_owner(&ncneg_shrink_lock) != NULL || 787 !mtx_trylock(&ncneg_shrink_lock)) { 788 counter_u64_add(shrinking_skipped, 1); 789 return; 790 } 791 792 mtx_lock(&ncneg_hot.nl_lock); 793 ncp = TAILQ_FIRST(&ncneg_hot.nl_list); 794 if (ncp != NULL) { 795 neglist = NCP2NEGLIST(ncp); 796 mtx_lock(&neglist->nl_lock); 797 TAILQ_REMOVE(&ncneg_hot.nl_list, ncp, nc_dst); 798 TAILQ_INSERT_TAIL(&neglist->nl_list, ncp, nc_dst); 799 ncp->nc_flag &= ~NCF_HOTNEGATIVE; 800 numhotneg--; 801 mtx_unlock(&neglist->nl_lock); 802 } 803 mtx_unlock(&ncneg_hot.nl_lock); 804 805 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist); 806 shrink_list_turn++; 807 if (shrink_list_turn == numneglists) 808 shrink_list_turn = 0; 809 if (ncp == NULL && shrink_list_turn == 0) 810 cache_negative_shrink_select(shrink_list_turn, &ncp, &neglist); 811 mtx_unlock(&ncneg_shrink_lock); 812 if (ncp == NULL) 813 return; 814 815 MPASS(ncp->nc_flag & NCF_NEGATIVE); 816 dvlp = VP2VNODELOCK(ncp->nc_dvp); 817 blp = NCP2BUCKETLOCK(ncp); 818 mtx_unlock(&neglist->nl_lock); 819 mtx_lock(dvlp); 820 rw_wlock(blp); 821 mtx_lock(&neglist->nl_lock); 822 ncp2 = TAILQ_FIRST(&neglist->nl_list); 823 if (ncp != ncp2 || dvlp != VP2VNODELOCK(ncp2->nc_dvp) || 824 blp != NCP2BUCKETLOCK(ncp2) || !(ncp2->nc_flag & NCF_NEGATIVE)) { 825 ncp = NULL; 826 } else { 827 SDT_PROBE2(vfs, namecache, shrink_negative, done, ncp->nc_dvp, 828 ncp->nc_name); 829 830 cache_zap_locked(ncp, true); 831 counter_u64_add(numneg_evicted, 1); 832 } 833 mtx_unlock(&neglist->nl_lock); 834 rw_wunlock(blp); 835 mtx_unlock(dvlp); 836 cache_free(ncp); 837 } 838 839 /* 840 * cache_zap_locked(): 841 * 842 * Removes a namecache entry from cache, whether it contains an actual 843 * pointer to a vnode or if it is just a negative cache entry. 844 */ 845 static void 846 cache_zap_locked(struct namecache *ncp, bool neg_locked) 847 { 848 849 if (!(ncp->nc_flag & NCF_NEGATIVE)) 850 cache_assert_vnode_locked(ncp->nc_vp); 851 cache_assert_vnode_locked(ncp->nc_dvp); 852 cache_assert_bucket_locked(ncp, RA_WLOCKED); 853 854 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, 855 (ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp); 856 LIST_REMOVE(ncp, nc_hash); 857 if (!(ncp->nc_flag & NCF_NEGATIVE)) { 858 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp, 859 ncp->nc_name, ncp->nc_vp); 860 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 861 if (ncp == ncp->nc_vp->v_cache_dd) 862 ncp->nc_vp->v_cache_dd = NULL; 863 } else { 864 SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp, 865 ncp->nc_name); 866 cache_negative_remove(ncp, neg_locked); 867 } 868 if (ncp->nc_flag & NCF_ISDOTDOT) { 869 if (ncp == ncp->nc_dvp->v_cache_dd) 870 ncp->nc_dvp->v_cache_dd = NULL; 871 } else { 872 LIST_REMOVE(ncp, nc_src); 873 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 874 ncp->nc_flag |= NCF_DVDROP; 875 counter_u64_add(numcachehv, -1); 876 } 877 } 878 atomic_subtract_rel_long(&numcache, 1); 879 } 880 881 static void 882 cache_zap_negative_locked_vnode_kl(struct namecache *ncp, struct vnode *vp) 883 { 884 struct rwlock *blp; 885 886 MPASS(ncp->nc_dvp == vp); 887 MPASS(ncp->nc_flag & NCF_NEGATIVE); 888 cache_assert_vnode_locked(vp); 889 890 blp = NCP2BUCKETLOCK(ncp); 891 rw_wlock(blp); 892 cache_zap_locked(ncp, false); 893 rw_wunlock(blp); 894 } 895 896 static bool 897 cache_zap_locked_vnode_kl2(struct namecache *ncp, struct vnode *vp, 898 struct mtx **vlpp) 899 { 900 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; 901 struct rwlock *blp; 902 903 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp); 904 cache_assert_vnode_locked(vp); 905 906 if (ncp->nc_flag & NCF_NEGATIVE) { 907 if (*vlpp != NULL) { 908 mtx_unlock(*vlpp); 909 *vlpp = NULL; 910 } 911 cache_zap_negative_locked_vnode_kl(ncp, vp); 912 return (true); 913 } 914 915 pvlp = VP2VNODELOCK(vp); 916 blp = NCP2BUCKETLOCK(ncp); 917 vlp1 = VP2VNODELOCK(ncp->nc_dvp); 918 vlp2 = VP2VNODELOCK(ncp->nc_vp); 919 920 if (*vlpp == vlp1 || *vlpp == vlp2) { 921 to_unlock = *vlpp; 922 *vlpp = NULL; 923 } else { 924 if (*vlpp != NULL) { 925 mtx_unlock(*vlpp); 926 *vlpp = NULL; 927 } 928 cache_sort_vnodes(&vlp1, &vlp2); 929 if (vlp1 == pvlp) { 930 mtx_lock(vlp2); 931 to_unlock = vlp2; 932 } else { 933 if (!mtx_trylock(vlp1)) 934 goto out_relock; 935 to_unlock = vlp1; 936 } 937 } 938 rw_wlock(blp); 939 cache_zap_locked(ncp, false); 940 rw_wunlock(blp); 941 if (to_unlock != NULL) 942 mtx_unlock(to_unlock); 943 return (true); 944 945 out_relock: 946 mtx_unlock(vlp2); 947 mtx_lock(vlp1); 948 mtx_lock(vlp2); 949 MPASS(*vlpp == NULL); 950 *vlpp = vlp1; 951 return (false); 952 } 953 954 static int __noinline 955 cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp) 956 { 957 struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; 958 struct rwlock *blp; 959 int error = 0; 960 961 MPASS(vp == ncp->nc_dvp || vp == ncp->nc_vp); 962 cache_assert_vnode_locked(vp); 963 964 pvlp = VP2VNODELOCK(vp); 965 if (ncp->nc_flag & NCF_NEGATIVE) { 966 cache_zap_negative_locked_vnode_kl(ncp, vp); 967 goto out; 968 } 969 970 blp = NCP2BUCKETLOCK(ncp); 971 vlp1 = VP2VNODELOCK(ncp->nc_dvp); 972 vlp2 = VP2VNODELOCK(ncp->nc_vp); 973 cache_sort_vnodes(&vlp1, &vlp2); 974 if (vlp1 == pvlp) { 975 mtx_lock(vlp2); 976 to_unlock = vlp2; 977 } else { 978 if (!mtx_trylock(vlp1)) { 979 error = EAGAIN; 980 goto out; 981 } 982 to_unlock = vlp1; 983 } 984 rw_wlock(blp); 985 cache_zap_locked(ncp, false); 986 rw_wunlock(blp); 987 mtx_unlock(to_unlock); 988 out: 989 mtx_unlock(pvlp); 990 return (error); 991 } 992 993 /* 994 * If trylocking failed we can get here. We know enough to take all needed locks 995 * in the right order and re-lookup the entry. 996 */ 997 static int 998 cache_zap_unlocked_bucket(struct namecache *ncp, struct componentname *cnp, 999 struct vnode *dvp, struct mtx *dvlp, struct mtx *vlp, uint32_t hash, 1000 struct rwlock *blp) 1001 { 1002 struct namecache *rncp; 1003 1004 cache_assert_bucket_locked(ncp, RA_UNLOCKED); 1005 1006 cache_sort_vnodes(&dvlp, &vlp); 1007 cache_lock_vnodes(dvlp, vlp); 1008 rw_wlock(blp); 1009 LIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) { 1010 if (rncp == ncp && rncp->nc_dvp == dvp && 1011 rncp->nc_nlen == cnp->cn_namelen && 1012 !bcmp(rncp->nc_name, cnp->cn_nameptr, rncp->nc_nlen)) 1013 break; 1014 } 1015 if (rncp != NULL) { 1016 cache_zap_locked(rncp, false); 1017 rw_wunlock(blp); 1018 cache_unlock_vnodes(dvlp, vlp); 1019 counter_u64_add(zap_and_exit_bucket_relock_success, 1); 1020 return (0); 1021 } 1022 1023 rw_wunlock(blp); 1024 cache_unlock_vnodes(dvlp, vlp); 1025 return (EAGAIN); 1026 } 1027 1028 static int __noinline 1029 cache_zap_wlocked_bucket(struct namecache *ncp, struct componentname *cnp, 1030 uint32_t hash, struct rwlock *blp) 1031 { 1032 struct mtx *dvlp, *vlp; 1033 struct vnode *dvp; 1034 1035 cache_assert_bucket_locked(ncp, RA_WLOCKED); 1036 1037 dvlp = VP2VNODELOCK(ncp->nc_dvp); 1038 vlp = NULL; 1039 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1040 vlp = VP2VNODELOCK(ncp->nc_vp); 1041 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1042 cache_zap_locked(ncp, false); 1043 rw_wunlock(blp); 1044 cache_unlock_vnodes(dvlp, vlp); 1045 return (0); 1046 } 1047 1048 dvp = ncp->nc_dvp; 1049 rw_wunlock(blp); 1050 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp)); 1051 } 1052 1053 static int __noinline 1054 cache_zap_rlocked_bucket(struct namecache *ncp, struct componentname *cnp, 1055 uint32_t hash, struct rwlock *blp) 1056 { 1057 struct mtx *dvlp, *vlp; 1058 struct vnode *dvp; 1059 1060 cache_assert_bucket_locked(ncp, RA_RLOCKED); 1061 1062 dvlp = VP2VNODELOCK(ncp->nc_dvp); 1063 vlp = NULL; 1064 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1065 vlp = VP2VNODELOCK(ncp->nc_vp); 1066 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1067 rw_runlock(blp); 1068 rw_wlock(blp); 1069 cache_zap_locked(ncp, false); 1070 rw_wunlock(blp); 1071 cache_unlock_vnodes(dvlp, vlp); 1072 return (0); 1073 } 1074 1075 dvp = ncp->nc_dvp; 1076 rw_runlock(blp); 1077 return (cache_zap_unlocked_bucket(ncp, cnp, dvp, dvlp, vlp, hash, blp)); 1078 } 1079 1080 static int 1081 cache_zap_wlocked_bucket_kl(struct namecache *ncp, struct rwlock *blp, 1082 struct mtx **vlpp1, struct mtx **vlpp2) 1083 { 1084 struct mtx *dvlp, *vlp; 1085 1086 cache_assert_bucket_locked(ncp, RA_WLOCKED); 1087 1088 dvlp = VP2VNODELOCK(ncp->nc_dvp); 1089 vlp = NULL; 1090 if (!(ncp->nc_flag & NCF_NEGATIVE)) 1091 vlp = VP2VNODELOCK(ncp->nc_vp); 1092 cache_sort_vnodes(&dvlp, &vlp); 1093 1094 if (*vlpp1 == dvlp && *vlpp2 == vlp) { 1095 cache_zap_locked(ncp, false); 1096 cache_unlock_vnodes(dvlp, vlp); 1097 *vlpp1 = NULL; 1098 *vlpp2 = NULL; 1099 return (0); 1100 } 1101 1102 if (*vlpp1 != NULL) 1103 mtx_unlock(*vlpp1); 1104 if (*vlpp2 != NULL) 1105 mtx_unlock(*vlpp2); 1106 *vlpp1 = NULL; 1107 *vlpp2 = NULL; 1108 1109 if (cache_trylock_vnodes(dvlp, vlp) == 0) { 1110 cache_zap_locked(ncp, false); 1111 cache_unlock_vnodes(dvlp, vlp); 1112 return (0); 1113 } 1114 1115 rw_wunlock(blp); 1116 *vlpp1 = dvlp; 1117 *vlpp2 = vlp; 1118 if (*vlpp1 != NULL) 1119 mtx_lock(*vlpp1); 1120 mtx_lock(*vlpp2); 1121 rw_wlock(blp); 1122 return (EAGAIN); 1123 } 1124 1125 static void 1126 cache_lookup_unlock(struct rwlock *blp, struct mtx *vlp) 1127 { 1128 1129 if (blp != NULL) { 1130 rw_runlock(blp); 1131 } else { 1132 mtx_unlock(vlp); 1133 } 1134 } 1135 1136 static int __noinline 1137 cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1138 struct timespec *tsp, int *ticksp) 1139 { 1140 int ltype; 1141 1142 *vpp = dvp; 1143 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 1144 dvp, cnp->cn_nameptr); 1145 counter_u64_add(dothits, 1); 1146 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp); 1147 if (tsp != NULL) 1148 timespecclear(tsp); 1149 if (ticksp != NULL) 1150 *ticksp = ticks; 1151 vrefact(*vpp); 1152 /* 1153 * When we lookup "." we still can be asked to lock it 1154 * differently... 1155 */ 1156 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 1157 if (ltype != VOP_ISLOCKED(*vpp)) { 1158 if (ltype == LK_EXCLUSIVE) { 1159 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 1160 if (VN_IS_DOOMED((*vpp))) { 1161 /* forced unmount */ 1162 vrele(*vpp); 1163 *vpp = NULL; 1164 return (ENOENT); 1165 } 1166 } else 1167 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 1168 } 1169 return (-1); 1170 } 1171 1172 static __noinline int 1173 cache_lookup_nomakeentry(struct vnode *dvp, struct vnode **vpp, 1174 struct componentname *cnp, struct timespec *tsp, int *ticksp) 1175 { 1176 struct namecache *ncp; 1177 struct rwlock *blp; 1178 struct mtx *dvlp, *dvlp2; 1179 uint32_t hash; 1180 int error; 1181 1182 if (cnp->cn_namelen == 2 && 1183 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { 1184 counter_u64_add(dotdothits, 1); 1185 dvlp = VP2VNODELOCK(dvp); 1186 dvlp2 = NULL; 1187 mtx_lock(dvlp); 1188 retry_dotdot: 1189 ncp = dvp->v_cache_dd; 1190 if (ncp == NULL) { 1191 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, 1192 "..", NULL); 1193 mtx_unlock(dvlp); 1194 if (dvlp2 != NULL) 1195 mtx_unlock(dvlp2); 1196 return (0); 1197 } 1198 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1199 if (ncp->nc_dvp != dvp) 1200 panic("dvp %p v_cache_dd %p\n", dvp, ncp); 1201 if (!cache_zap_locked_vnode_kl2(ncp, 1202 dvp, &dvlp2)) 1203 goto retry_dotdot; 1204 MPASS(dvp->v_cache_dd == NULL); 1205 mtx_unlock(dvlp); 1206 if (dvlp2 != NULL) 1207 mtx_unlock(dvlp2); 1208 cache_free(ncp); 1209 } else { 1210 dvp->v_cache_dd = NULL; 1211 mtx_unlock(dvlp); 1212 if (dvlp2 != NULL) 1213 mtx_unlock(dvlp2); 1214 } 1215 return (0); 1216 } 1217 1218 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1219 blp = HASH2BUCKETLOCK(hash); 1220 retry: 1221 if (LIST_EMPTY(NCHHASH(hash))) 1222 goto out_no_entry; 1223 1224 rw_wlock(blp); 1225 1226 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1227 counter_u64_add(numchecks, 1); 1228 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1229 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1230 break; 1231 } 1232 1233 /* We failed to find an entry */ 1234 if (ncp == NULL) { 1235 rw_wunlock(blp); 1236 goto out_no_entry; 1237 } 1238 1239 error = cache_zap_wlocked_bucket(ncp, cnp, hash, blp); 1240 if (__predict_false(error != 0)) { 1241 zap_and_exit_bucket_fail++; 1242 cache_maybe_yield(); 1243 goto retry; 1244 } 1245 counter_u64_add(numposzaps, 1); 1246 cache_free(ncp); 1247 return (0); 1248 out_no_entry: 1249 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, NULL); 1250 counter_u64_add(nummisszap, 1); 1251 return (0); 1252 } 1253 1254 /** 1255 * Lookup a name in the name cache 1256 * 1257 * # Arguments 1258 * 1259 * - dvp: Parent directory in which to search. 1260 * - vpp: Return argument. Will contain desired vnode on cache hit. 1261 * - cnp: Parameters of the name search. The most interesting bits of 1262 * the cn_flags field have the following meanings: 1263 * - MAKEENTRY: If clear, free an entry from the cache rather than look 1264 * it up. 1265 * - ISDOTDOT: Must be set if and only if cn_nameptr == ".." 1266 * - tsp: Return storage for cache timestamp. On a successful (positive 1267 * or negative) lookup, tsp will be filled with any timespec that 1268 * was stored when this cache entry was created. However, it will 1269 * be clear for "." entries. 1270 * - ticks: Return storage for alternate cache timestamp. On a successful 1271 * (positive or negative) lookup, it will contain the ticks value 1272 * that was current when the cache entry was created, unless cnp 1273 * was ".". 1274 * 1275 * # Returns 1276 * 1277 * - -1: A positive cache hit. vpp will contain the desired vnode. 1278 * - ENOENT: A negative cache hit, or dvp was recycled out from under us due 1279 * to a forced unmount. vpp will not be modified. If the entry 1280 * is a whiteout, then the ISWHITEOUT flag will be set in 1281 * cnp->cn_flags. 1282 * - 0: A cache miss. vpp will not be modified. 1283 * 1284 * # Locking 1285 * 1286 * On a cache hit, vpp will be returned locked and ref'd. If we're looking up 1287 * .., dvp is unlocked. If we're looking up . an extra ref is taken, but the 1288 * lock is not recursively acquired. 1289 */ 1290 int 1291 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 1292 struct timespec *tsp, int *ticksp) 1293 { 1294 struct namecache_ts *ncp_ts; 1295 struct namecache *ncp; 1296 struct rwlock *blp; 1297 struct mtx *dvlp; 1298 uint32_t hash; 1299 enum vgetstate vs; 1300 int error, ltype; 1301 1302 #ifdef DEBUG_CACHE 1303 if (__predict_false(!doingcache)) { 1304 cnp->cn_flags &= ~MAKEENTRY; 1305 return (0); 1306 } 1307 #endif 1308 1309 counter_u64_add(numcalls, 1); 1310 1311 if (__predict_false(cnp->cn_namelen == 1 && cnp->cn_nameptr[0] == '.')) 1312 return (cache_lookup_dot(dvp, vpp, cnp, tsp, ticksp)); 1313 1314 if ((cnp->cn_flags & MAKEENTRY) == 0) 1315 return (cache_lookup_nomakeentry(dvp, vpp, cnp, tsp, ticksp)); 1316 1317 retry: 1318 blp = NULL; 1319 dvlp = NULL; 1320 error = 0; 1321 if (cnp->cn_namelen == 2 && 1322 cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '.') { 1323 counter_u64_add(dotdothits, 1); 1324 dvlp = VP2VNODELOCK(dvp); 1325 mtx_lock(dvlp); 1326 ncp = dvp->v_cache_dd; 1327 if (ncp == NULL) { 1328 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, 1329 "..", NULL); 1330 mtx_unlock(dvlp); 1331 return (0); 1332 } 1333 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) { 1334 if (ncp->nc_flag & NCF_NEGATIVE) 1335 *vpp = NULL; 1336 else 1337 *vpp = ncp->nc_vp; 1338 } else 1339 *vpp = ncp->nc_dvp; 1340 /* Return failure if negative entry was found. */ 1341 if (*vpp == NULL) 1342 goto negative_success; 1343 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 1344 dvp, cnp->cn_nameptr, *vpp); 1345 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", 1346 *vpp); 1347 cache_out_ts(ncp, tsp, ticksp); 1348 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 1349 NCF_DTS && tsp != NULL) { 1350 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 1351 *tsp = ncp_ts->nc_dotdottime; 1352 } 1353 goto success; 1354 } 1355 1356 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 1357 blp = HASH2BUCKETLOCK(hash); 1358 rw_rlock(blp); 1359 1360 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 1361 counter_u64_add(numchecks, 1); 1362 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 1363 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 1364 break; 1365 } 1366 1367 /* We failed to find an entry */ 1368 if (__predict_false(ncp == NULL)) { 1369 rw_runlock(blp); 1370 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 1371 NULL); 1372 counter_u64_add(nummiss, 1); 1373 return (0); 1374 } 1375 1376 if (ncp->nc_flag & NCF_NEGATIVE) 1377 goto negative_success; 1378 1379 /* We found a "positive" match, return the vnode */ 1380 counter_u64_add(numposhits, 1); 1381 *vpp = ncp->nc_vp; 1382 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 1383 dvp, cnp->cn_nameptr, *vpp, ncp); 1384 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, 1385 *vpp); 1386 cache_out_ts(ncp, tsp, ticksp); 1387 success: 1388 /* 1389 * On success we return a locked and ref'd vnode as per the lookup 1390 * protocol. 1391 */ 1392 MPASS(dvp != *vpp); 1393 ltype = 0; /* silence gcc warning */ 1394 if (cnp->cn_flags & ISDOTDOT) { 1395 ltype = VOP_ISLOCKED(dvp); 1396 VOP_UNLOCK(dvp); 1397 } 1398 vs = vget_prep(*vpp); 1399 cache_lookup_unlock(blp, dvlp); 1400 error = vget_finish(*vpp, cnp->cn_lkflags, vs); 1401 if (cnp->cn_flags & ISDOTDOT) { 1402 vn_lock(dvp, ltype | LK_RETRY); 1403 if (VN_IS_DOOMED(dvp)) { 1404 if (error == 0) 1405 vput(*vpp); 1406 *vpp = NULL; 1407 return (ENOENT); 1408 } 1409 } 1410 if (error) { 1411 *vpp = NULL; 1412 goto retry; 1413 } 1414 if ((cnp->cn_flags & ISLASTCN) && 1415 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 1416 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 1417 } 1418 return (-1); 1419 1420 negative_success: 1421 /* We found a negative match, and want to create it, so purge */ 1422 if (cnp->cn_nameiop == CREATE) { 1423 counter_u64_add(numnegzaps, 1); 1424 goto zap_and_exit; 1425 } 1426 1427 counter_u64_add(numneghits, 1); 1428 cache_negative_hit(ncp); 1429 if (ncp->nc_flag & NCF_WHITE) 1430 cnp->cn_flags |= ISWHITEOUT; 1431 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, 1432 ncp->nc_name); 1433 cache_out_ts(ncp, tsp, ticksp); 1434 cache_lookup_unlock(blp, dvlp); 1435 return (ENOENT); 1436 1437 zap_and_exit: 1438 if (blp != NULL) 1439 error = cache_zap_rlocked_bucket(ncp, cnp, hash, blp); 1440 else 1441 error = cache_zap_locked_vnode(ncp, dvp); 1442 if (__predict_false(error != 0)) { 1443 zap_and_exit_bucket_fail2++; 1444 cache_maybe_yield(); 1445 goto retry; 1446 } 1447 cache_free(ncp); 1448 return (0); 1449 } 1450 1451 struct celockstate { 1452 struct mtx *vlp[3]; 1453 struct rwlock *blp[2]; 1454 }; 1455 CTASSERT((nitems(((struct celockstate *)0)->vlp) == 3)); 1456 CTASSERT((nitems(((struct celockstate *)0)->blp) == 2)); 1457 1458 static inline void 1459 cache_celockstate_init(struct celockstate *cel) 1460 { 1461 1462 bzero(cel, sizeof(*cel)); 1463 } 1464 1465 static void 1466 cache_lock_vnodes_cel(struct celockstate *cel, struct vnode *vp, 1467 struct vnode *dvp) 1468 { 1469 struct mtx *vlp1, *vlp2; 1470 1471 MPASS(cel->vlp[0] == NULL); 1472 MPASS(cel->vlp[1] == NULL); 1473 MPASS(cel->vlp[2] == NULL); 1474 1475 MPASS(vp != NULL || dvp != NULL); 1476 1477 vlp1 = VP2VNODELOCK(vp); 1478 vlp2 = VP2VNODELOCK(dvp); 1479 cache_sort_vnodes(&vlp1, &vlp2); 1480 1481 if (vlp1 != NULL) { 1482 mtx_lock(vlp1); 1483 cel->vlp[0] = vlp1; 1484 } 1485 mtx_lock(vlp2); 1486 cel->vlp[1] = vlp2; 1487 } 1488 1489 static void 1490 cache_unlock_vnodes_cel(struct celockstate *cel) 1491 { 1492 1493 MPASS(cel->vlp[0] != NULL || cel->vlp[1] != NULL); 1494 1495 if (cel->vlp[0] != NULL) 1496 mtx_unlock(cel->vlp[0]); 1497 if (cel->vlp[1] != NULL) 1498 mtx_unlock(cel->vlp[1]); 1499 if (cel->vlp[2] != NULL) 1500 mtx_unlock(cel->vlp[2]); 1501 } 1502 1503 static bool 1504 cache_lock_vnodes_cel_3(struct celockstate *cel, struct vnode *vp) 1505 { 1506 struct mtx *vlp; 1507 bool ret; 1508 1509 cache_assert_vlp_locked(cel->vlp[0]); 1510 cache_assert_vlp_locked(cel->vlp[1]); 1511 MPASS(cel->vlp[2] == NULL); 1512 1513 MPASS(vp != NULL); 1514 vlp = VP2VNODELOCK(vp); 1515 1516 ret = true; 1517 if (vlp >= cel->vlp[1]) { 1518 mtx_lock(vlp); 1519 } else { 1520 if (mtx_trylock(vlp)) 1521 goto out; 1522 cache_lock_vnodes_cel_3_failures++; 1523 cache_unlock_vnodes_cel(cel); 1524 if (vlp < cel->vlp[0]) { 1525 mtx_lock(vlp); 1526 mtx_lock(cel->vlp[0]); 1527 mtx_lock(cel->vlp[1]); 1528 } else { 1529 if (cel->vlp[0] != NULL) 1530 mtx_lock(cel->vlp[0]); 1531 mtx_lock(vlp); 1532 mtx_lock(cel->vlp[1]); 1533 } 1534 ret = false; 1535 } 1536 out: 1537 cel->vlp[2] = vlp; 1538 return (ret); 1539 } 1540 1541 static void 1542 cache_lock_buckets_cel(struct celockstate *cel, struct rwlock *blp1, 1543 struct rwlock *blp2) 1544 { 1545 1546 MPASS(cel->blp[0] == NULL); 1547 MPASS(cel->blp[1] == NULL); 1548 1549 cache_sort_vnodes(&blp1, &blp2); 1550 1551 if (blp1 != NULL) { 1552 rw_wlock(blp1); 1553 cel->blp[0] = blp1; 1554 } 1555 rw_wlock(blp2); 1556 cel->blp[1] = blp2; 1557 } 1558 1559 static void 1560 cache_unlock_buckets_cel(struct celockstate *cel) 1561 { 1562 1563 if (cel->blp[0] != NULL) 1564 rw_wunlock(cel->blp[0]); 1565 rw_wunlock(cel->blp[1]); 1566 } 1567 1568 /* 1569 * Lock part of the cache affected by the insertion. 1570 * 1571 * This means vnodelocks for dvp, vp and the relevant bucketlock. 1572 * However, insertion can result in removal of an old entry. In this 1573 * case we have an additional vnode and bucketlock pair to lock. If the 1574 * entry is negative, ncelock is locked instead of the vnode. 1575 * 1576 * That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while 1577 * preserving the locking order (smaller address first). 1578 */ 1579 static void 1580 cache_enter_lock(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 1581 uint32_t hash) 1582 { 1583 struct namecache *ncp; 1584 struct rwlock *blps[2]; 1585 1586 blps[0] = HASH2BUCKETLOCK(hash); 1587 for (;;) { 1588 blps[1] = NULL; 1589 cache_lock_vnodes_cel(cel, dvp, vp); 1590 if (vp == NULL || vp->v_type != VDIR) 1591 break; 1592 ncp = vp->v_cache_dd; 1593 if (ncp == NULL) 1594 break; 1595 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1596 break; 1597 MPASS(ncp->nc_dvp == vp); 1598 blps[1] = NCP2BUCKETLOCK(ncp); 1599 if (ncp->nc_flag & NCF_NEGATIVE) 1600 break; 1601 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 1602 break; 1603 /* 1604 * All vnodes got re-locked. Re-validate the state and if 1605 * nothing changed we are done. Otherwise restart. 1606 */ 1607 if (ncp == vp->v_cache_dd && 1608 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 1609 blps[1] == NCP2BUCKETLOCK(ncp) && 1610 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 1611 break; 1612 cache_unlock_vnodes_cel(cel); 1613 cel->vlp[0] = NULL; 1614 cel->vlp[1] = NULL; 1615 cel->vlp[2] = NULL; 1616 } 1617 cache_lock_buckets_cel(cel, blps[0], blps[1]); 1618 } 1619 1620 static void 1621 cache_enter_lock_dd(struct celockstate *cel, struct vnode *dvp, struct vnode *vp, 1622 uint32_t hash) 1623 { 1624 struct namecache *ncp; 1625 struct rwlock *blps[2]; 1626 1627 blps[0] = HASH2BUCKETLOCK(hash); 1628 for (;;) { 1629 blps[1] = NULL; 1630 cache_lock_vnodes_cel(cel, dvp, vp); 1631 ncp = dvp->v_cache_dd; 1632 if (ncp == NULL) 1633 break; 1634 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1635 break; 1636 MPASS(ncp->nc_dvp == dvp); 1637 blps[1] = NCP2BUCKETLOCK(ncp); 1638 if (ncp->nc_flag & NCF_NEGATIVE) 1639 break; 1640 if (cache_lock_vnodes_cel_3(cel, ncp->nc_vp)) 1641 break; 1642 if (ncp == dvp->v_cache_dd && 1643 (ncp->nc_flag & NCF_ISDOTDOT) != 0 && 1644 blps[1] == NCP2BUCKETLOCK(ncp) && 1645 VP2VNODELOCK(ncp->nc_vp) == cel->vlp[2]) 1646 break; 1647 cache_unlock_vnodes_cel(cel); 1648 cel->vlp[0] = NULL; 1649 cel->vlp[1] = NULL; 1650 cel->vlp[2] = NULL; 1651 } 1652 cache_lock_buckets_cel(cel, blps[0], blps[1]); 1653 } 1654 1655 static void 1656 cache_enter_unlock(struct celockstate *cel) 1657 { 1658 1659 cache_unlock_buckets_cel(cel); 1660 cache_unlock_vnodes_cel(cel); 1661 } 1662 1663 static void __noinline 1664 cache_enter_dotdot_prep(struct vnode *dvp, struct vnode *vp, 1665 struct componentname *cnp) 1666 { 1667 struct celockstate cel; 1668 struct namecache *ncp; 1669 uint32_t hash; 1670 int len; 1671 1672 if (dvp->v_cache_dd == NULL) 1673 return; 1674 len = cnp->cn_namelen; 1675 cache_celockstate_init(&cel); 1676 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 1677 cache_enter_lock_dd(&cel, dvp, vp, hash); 1678 ncp = dvp->v_cache_dd; 1679 if (ncp != NULL && (ncp->nc_flag & NCF_ISDOTDOT)) { 1680 KASSERT(ncp->nc_dvp == dvp, ("wrong isdotdot parent")); 1681 cache_zap_locked(ncp, false); 1682 } else { 1683 ncp = NULL; 1684 } 1685 dvp->v_cache_dd = NULL; 1686 cache_enter_unlock(&cel); 1687 cache_free(ncp); 1688 } 1689 1690 /* 1691 * Add an entry to the cache. 1692 */ 1693 void 1694 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, 1695 struct timespec *tsp, struct timespec *dtsp) 1696 { 1697 struct celockstate cel; 1698 struct namecache *ncp, *n2, *ndd; 1699 struct namecache_ts *ncp_ts, *n2_ts; 1700 struct nchashhead *ncpp; 1701 uint32_t hash; 1702 int flag; 1703 int len; 1704 u_long lnumcache; 1705 1706 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 1707 VNASSERT(vp == NULL || !VN_IS_DOOMED(vp), vp, 1708 ("cache_enter: Adding a doomed vnode")); 1709 VNASSERT(dvp == NULL || !VN_IS_DOOMED(dvp), dvp, 1710 ("cache_enter: Doomed vnode used as src")); 1711 1712 #ifdef DEBUG_CACHE 1713 if (__predict_false(!doingcache)) 1714 return; 1715 #endif 1716 1717 flag = 0; 1718 if (__predict_false(cnp->cn_nameptr[0] == '.')) { 1719 if (cnp->cn_namelen == 1) 1720 return; 1721 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 1722 cache_enter_dotdot_prep(dvp, vp, cnp); 1723 flag = NCF_ISDOTDOT; 1724 } 1725 } 1726 1727 /* 1728 * Avoid blowout in namecache entries. 1729 */ 1730 lnumcache = atomic_fetchadd_long(&numcache, 1) + 1; 1731 if (__predict_false(lnumcache >= ncsize)) { 1732 atomic_add_long(&numcache, -1); 1733 return; 1734 } 1735 1736 cache_celockstate_init(&cel); 1737 ndd = NULL; 1738 ncp_ts = NULL; 1739 1740 /* 1741 * Calculate the hash key and setup as much of the new 1742 * namecache entry as possible before acquiring the lock. 1743 */ 1744 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 1745 ncp->nc_flag = flag; 1746 ncp->nc_vp = vp; 1747 if (vp == NULL) 1748 ncp->nc_flag |= NCF_NEGATIVE; 1749 ncp->nc_dvp = dvp; 1750 if (tsp != NULL) { 1751 ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc); 1752 ncp_ts->nc_time = *tsp; 1753 ncp_ts->nc_ticks = ticks; 1754 ncp_ts->nc_nc.nc_flag |= NCF_TS; 1755 if (dtsp != NULL) { 1756 ncp_ts->nc_dotdottime = *dtsp; 1757 ncp_ts->nc_nc.nc_flag |= NCF_DTS; 1758 } 1759 } 1760 len = ncp->nc_nlen = cnp->cn_namelen; 1761 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 1762 strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1); 1763 cache_enter_lock(&cel, dvp, vp, hash); 1764 1765 /* 1766 * See if this vnode or negative entry is already in the cache 1767 * with this name. This can happen with concurrent lookups of 1768 * the same path name. 1769 */ 1770 ncpp = NCHHASH(hash); 1771 LIST_FOREACH(n2, ncpp, nc_hash) { 1772 if (n2->nc_dvp == dvp && 1773 n2->nc_nlen == cnp->cn_namelen && 1774 !bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) { 1775 if (tsp != NULL) { 1776 KASSERT((n2->nc_flag & NCF_TS) != 0, 1777 ("no NCF_TS")); 1778 n2_ts = __containerof(n2, struct namecache_ts, nc_nc); 1779 n2_ts->nc_time = ncp_ts->nc_time; 1780 n2_ts->nc_ticks = ncp_ts->nc_ticks; 1781 if (dtsp != NULL) { 1782 n2_ts->nc_dotdottime = ncp_ts->nc_dotdottime; 1783 if (ncp->nc_flag & NCF_NEGATIVE) 1784 mtx_lock(&ncneg_hot.nl_lock); 1785 n2_ts->nc_nc.nc_flag |= NCF_DTS; 1786 if (ncp->nc_flag & NCF_NEGATIVE) 1787 mtx_unlock(&ncneg_hot.nl_lock); 1788 } 1789 } 1790 goto out_unlock_free; 1791 } 1792 } 1793 1794 if (flag == NCF_ISDOTDOT) { 1795 /* 1796 * See if we are trying to add .. entry, but some other lookup 1797 * has populated v_cache_dd pointer already. 1798 */ 1799 if (dvp->v_cache_dd != NULL) 1800 goto out_unlock_free; 1801 KASSERT(vp == NULL || vp->v_type == VDIR, 1802 ("wrong vnode type %p", vp)); 1803 dvp->v_cache_dd = ncp; 1804 } 1805 1806 if (vp != NULL) { 1807 if (vp->v_type == VDIR) { 1808 if (flag != NCF_ISDOTDOT) { 1809 /* 1810 * For this case, the cache entry maps both the 1811 * directory name in it and the name ".." for the 1812 * directory's parent. 1813 */ 1814 if ((ndd = vp->v_cache_dd) != NULL) { 1815 if ((ndd->nc_flag & NCF_ISDOTDOT) != 0) 1816 cache_zap_locked(ndd, false); 1817 else 1818 ndd = NULL; 1819 } 1820 vp->v_cache_dd = ncp; 1821 } 1822 } else { 1823 vp->v_cache_dd = NULL; 1824 } 1825 } 1826 1827 if (flag != NCF_ISDOTDOT) { 1828 if (LIST_EMPTY(&dvp->v_cache_src)) { 1829 vhold(dvp); 1830 counter_u64_add(numcachehv, 1); 1831 } 1832 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 1833 } 1834 1835 /* 1836 * Insert the new namecache entry into the appropriate chain 1837 * within the cache entries table. 1838 */ 1839 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 1840 1841 /* 1842 * If the entry is "negative", we place it into the 1843 * "negative" cache queue, otherwise, we place it into the 1844 * destination vnode's cache entries queue. 1845 */ 1846 if (vp != NULL) { 1847 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 1848 SDT_PROBE3(vfs, namecache, enter, done, dvp, ncp->nc_name, 1849 vp); 1850 } else { 1851 if (cnp->cn_flags & ISWHITEOUT) 1852 ncp->nc_flag |= NCF_WHITE; 1853 cache_negative_insert(ncp, false); 1854 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp, 1855 ncp->nc_name); 1856 } 1857 cache_enter_unlock(&cel); 1858 if (numneg * ncnegfactor > lnumcache) 1859 cache_negative_zap_one(); 1860 cache_free(ndd); 1861 return; 1862 out_unlock_free: 1863 cache_enter_unlock(&cel); 1864 cache_free(ncp); 1865 return; 1866 } 1867 1868 static u_int 1869 cache_roundup_2(u_int val) 1870 { 1871 u_int res; 1872 1873 for (res = 1; res <= val; res <<= 1) 1874 continue; 1875 1876 return (res); 1877 } 1878 1879 /* 1880 * Name cache initialization, from vfs_init() when we are booting 1881 */ 1882 static void 1883 nchinit(void *dummy __unused) 1884 { 1885 u_int i; 1886 1887 cache_zone_small = uma_zcreate("S VFS Cache", 1888 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1, 1889 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache), 1890 UMA_ZONE_ZINIT); 1891 cache_zone_small_ts = uma_zcreate("STS VFS Cache", 1892 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1, 1893 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts), 1894 UMA_ZONE_ZINIT); 1895 cache_zone_large = uma_zcreate("L VFS Cache", 1896 sizeof(struct namecache) + NAME_MAX + 1, 1897 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache), 1898 UMA_ZONE_ZINIT); 1899 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", 1900 sizeof(struct namecache_ts) + NAME_MAX + 1, 1901 NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct namecache_ts), 1902 UMA_ZONE_ZINIT); 1903 1904 ncsize = desiredvnodes * ncsizefactor; 1905 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 1906 ncbuckethash = cache_roundup_2(mp_ncpus * mp_ncpus) - 1; 1907 if (ncbuckethash < 7) /* arbitrarily chosen to avoid having one lock */ 1908 ncbuckethash = 7; 1909 if (ncbuckethash > nchash) 1910 ncbuckethash = nchash; 1911 bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE, 1912 M_WAITOK | M_ZERO); 1913 for (i = 0; i < numbucketlocks; i++) 1914 rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE); 1915 ncvnodehash = ncbuckethash; 1916 vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE, 1917 M_WAITOK | M_ZERO); 1918 for (i = 0; i < numvnodelocks; i++) 1919 mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE); 1920 ncpurgeminvnodes = numbucketlocks * 2; 1921 1922 ncneghash = 3; 1923 neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE, 1924 M_WAITOK | M_ZERO); 1925 for (i = 0; i < numneglists; i++) { 1926 mtx_init(&neglists[i].nl_lock, "ncnegl", NULL, MTX_DEF); 1927 TAILQ_INIT(&neglists[i].nl_list); 1928 } 1929 mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF); 1930 TAILQ_INIT(&ncneg_hot.nl_list); 1931 1932 mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF); 1933 1934 numcachehv = counter_u64_alloc(M_WAITOK); 1935 numcalls = counter_u64_alloc(M_WAITOK); 1936 dothits = counter_u64_alloc(M_WAITOK); 1937 dotdothits = counter_u64_alloc(M_WAITOK); 1938 numchecks = counter_u64_alloc(M_WAITOK); 1939 nummiss = counter_u64_alloc(M_WAITOK); 1940 nummisszap = counter_u64_alloc(M_WAITOK); 1941 numposzaps = counter_u64_alloc(M_WAITOK); 1942 numposhits = counter_u64_alloc(M_WAITOK); 1943 numnegzaps = counter_u64_alloc(M_WAITOK); 1944 numneghits = counter_u64_alloc(M_WAITOK); 1945 numfullpathcalls = counter_u64_alloc(M_WAITOK); 1946 numfullpathfail1 = counter_u64_alloc(M_WAITOK); 1947 numfullpathfail2 = counter_u64_alloc(M_WAITOK); 1948 numfullpathfail4 = counter_u64_alloc(M_WAITOK); 1949 numfullpathfound = counter_u64_alloc(M_WAITOK); 1950 zap_and_exit_bucket_relock_success = counter_u64_alloc(M_WAITOK); 1951 numneg_evicted = counter_u64_alloc(M_WAITOK); 1952 shrinking_skipped = counter_u64_alloc(M_WAITOK); 1953 } 1954 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 1955 1956 void 1957 cache_changesize(u_long newmaxvnodes) 1958 { 1959 struct nchashhead *new_nchashtbl, *old_nchashtbl; 1960 u_long new_nchash, old_nchash; 1961 struct namecache *ncp; 1962 uint32_t hash; 1963 u_long newncsize; 1964 int i; 1965 1966 newncsize = newmaxvnodes * ncsizefactor; 1967 newmaxvnodes = cache_roundup_2(newmaxvnodes * 2); 1968 if (newmaxvnodes < numbucketlocks) 1969 newmaxvnodes = numbucketlocks; 1970 1971 new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash); 1972 /* If same hash table size, nothing to do */ 1973 if (nchash == new_nchash) { 1974 free(new_nchashtbl, M_VFSCACHE); 1975 return; 1976 } 1977 /* 1978 * Move everything from the old hash table to the new table. 1979 * None of the namecache entries in the table can be removed 1980 * because to do so, they have to be removed from the hash table. 1981 */ 1982 cache_lock_all_vnodes(); 1983 cache_lock_all_buckets(); 1984 old_nchashtbl = nchashtbl; 1985 old_nchash = nchash; 1986 nchashtbl = new_nchashtbl; 1987 nchash = new_nchash; 1988 for (i = 0; i <= old_nchash; i++) { 1989 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) { 1990 hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, 1991 ncp->nc_dvp); 1992 LIST_REMOVE(ncp, nc_hash); 1993 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash); 1994 } 1995 } 1996 ncsize = newncsize; 1997 cache_unlock_all_buckets(); 1998 cache_unlock_all_vnodes(); 1999 free(old_nchashtbl, M_VFSCACHE); 2000 } 2001 2002 /* 2003 * Invalidate all entries from and to a particular vnode. 2004 */ 2005 void 2006 cache_purge(struct vnode *vp) 2007 { 2008 TAILQ_HEAD(, namecache) ncps; 2009 struct namecache *ncp, *nnp; 2010 struct mtx *vlp, *vlp2; 2011 2012 CTR1(KTR_VFS, "cache_purge(%p)", vp); 2013 SDT_PROBE1(vfs, namecache, purge, done, vp); 2014 if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) && 2015 vp->v_cache_dd == NULL) 2016 return; 2017 TAILQ_INIT(&ncps); 2018 vlp = VP2VNODELOCK(vp); 2019 vlp2 = NULL; 2020 mtx_lock(vlp); 2021 retry: 2022 while (!LIST_EMPTY(&vp->v_cache_src)) { 2023 ncp = LIST_FIRST(&vp->v_cache_src); 2024 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2025 goto retry; 2026 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 2027 } 2028 while (!TAILQ_EMPTY(&vp->v_cache_dst)) { 2029 ncp = TAILQ_FIRST(&vp->v_cache_dst); 2030 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2031 goto retry; 2032 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 2033 } 2034 ncp = vp->v_cache_dd; 2035 if (ncp != NULL) { 2036 KASSERT(ncp->nc_flag & NCF_ISDOTDOT, 2037 ("lost dotdot link")); 2038 if (!cache_zap_locked_vnode_kl2(ncp, vp, &vlp2)) 2039 goto retry; 2040 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 2041 } 2042 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 2043 mtx_unlock(vlp); 2044 if (vlp2 != NULL) 2045 mtx_unlock(vlp2); 2046 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) { 2047 cache_free(ncp); 2048 } 2049 } 2050 2051 /* 2052 * Invalidate all negative entries for a particular directory vnode. 2053 */ 2054 void 2055 cache_purge_negative(struct vnode *vp) 2056 { 2057 TAILQ_HEAD(, namecache) ncps; 2058 struct namecache *ncp, *nnp; 2059 struct mtx *vlp; 2060 2061 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp); 2062 SDT_PROBE1(vfs, namecache, purge_negative, done, vp); 2063 if (LIST_EMPTY(&vp->v_cache_src)) 2064 return; 2065 TAILQ_INIT(&ncps); 2066 vlp = VP2VNODELOCK(vp); 2067 mtx_lock(vlp); 2068 LIST_FOREACH_SAFE(ncp, &vp->v_cache_src, nc_src, nnp) { 2069 if (!(ncp->nc_flag & NCF_NEGATIVE)) 2070 continue; 2071 cache_zap_negative_locked_vnode_kl(ncp, vp); 2072 TAILQ_INSERT_TAIL(&ncps, ncp, nc_dst); 2073 } 2074 mtx_unlock(vlp); 2075 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) { 2076 cache_free(ncp); 2077 } 2078 } 2079 2080 /* 2081 * Flush all entries referencing a particular filesystem. 2082 */ 2083 void 2084 cache_purgevfs(struct mount *mp, bool force) 2085 { 2086 TAILQ_HEAD(, namecache) ncps; 2087 struct mtx *vlp1, *vlp2; 2088 struct rwlock *blp; 2089 struct nchashhead *bucket; 2090 struct namecache *ncp, *nnp; 2091 u_long i, j, n_nchash; 2092 int error; 2093 2094 /* Scan hash tables for applicable entries */ 2095 SDT_PROBE1(vfs, namecache, purgevfs, done, mp); 2096 if (!force && mp->mnt_nvnodelistsize <= ncpurgeminvnodes) 2097 return; 2098 TAILQ_INIT(&ncps); 2099 n_nchash = nchash + 1; 2100 vlp1 = vlp2 = NULL; 2101 for (i = 0; i < numbucketlocks; i++) { 2102 blp = (struct rwlock *)&bucketlocks[i]; 2103 rw_wlock(blp); 2104 for (j = i; j < n_nchash; j += numbucketlocks) { 2105 retry: 2106 bucket = &nchashtbl[j]; 2107 LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) { 2108 cache_assert_bucket_locked(ncp, RA_WLOCKED); 2109 if (ncp->nc_dvp->v_mount != mp) 2110 continue; 2111 error = cache_zap_wlocked_bucket_kl(ncp, blp, 2112 &vlp1, &vlp2); 2113 if (error != 0) 2114 goto retry; 2115 TAILQ_INSERT_HEAD(&ncps, ncp, nc_dst); 2116 } 2117 } 2118 rw_wunlock(blp); 2119 if (vlp1 == NULL && vlp2 == NULL) 2120 cache_maybe_yield(); 2121 } 2122 if (vlp1 != NULL) 2123 mtx_unlock(vlp1); 2124 if (vlp2 != NULL) 2125 mtx_unlock(vlp2); 2126 2127 TAILQ_FOREACH_SAFE(ncp, &ncps, nc_dst, nnp) { 2128 cache_free(ncp); 2129 } 2130 } 2131 2132 /* 2133 * Perform canonical checks and cache lookup and pass on to filesystem 2134 * through the vop_cachedlookup only if needed. 2135 */ 2136 2137 int 2138 vfs_cache_lookup(struct vop_lookup_args *ap) 2139 { 2140 struct vnode *dvp; 2141 int error; 2142 struct vnode **vpp = ap->a_vpp; 2143 struct componentname *cnp = ap->a_cnp; 2144 struct ucred *cred = cnp->cn_cred; 2145 int flags = cnp->cn_flags; 2146 struct thread *td = cnp->cn_thread; 2147 2148 *vpp = NULL; 2149 dvp = ap->a_dvp; 2150 2151 if (dvp->v_type != VDIR) 2152 return (ENOTDIR); 2153 2154 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 2155 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 2156 return (EROFS); 2157 2158 error = VOP_ACCESS(dvp, VEXEC, cred, td); 2159 if (error) 2160 return (error); 2161 2162 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 2163 if (error == 0) 2164 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 2165 if (error == -1) 2166 return (0); 2167 return (error); 2168 } 2169 2170 /* 2171 * XXX All of these sysctls would probably be more productive dead. 2172 */ 2173 static int __read_mostly disablecwd; 2174 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 2175 "Disable the getcwd syscall"); 2176 2177 /* Implementation of the getcwd syscall. */ 2178 int 2179 sys___getcwd(struct thread *td, struct __getcwd_args *uap) 2180 { 2181 2182 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen, 2183 MAXPATHLEN)); 2184 } 2185 2186 int 2187 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, size_t buflen, 2188 size_t path_max) 2189 { 2190 char *bp, *tmpbuf; 2191 struct filedesc *fdp; 2192 struct vnode *cdir, *rdir; 2193 int error; 2194 2195 if (__predict_false(disablecwd)) 2196 return (ENODEV); 2197 if (__predict_false(buflen < 2)) 2198 return (EINVAL); 2199 if (buflen > path_max) 2200 buflen = path_max; 2201 2202 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 2203 fdp = td->td_proc->p_fd; 2204 FILEDESC_SLOCK(fdp); 2205 cdir = fdp->fd_cdir; 2206 vrefact(cdir); 2207 rdir = fdp->fd_rdir; 2208 vrefact(rdir); 2209 FILEDESC_SUNLOCK(fdp); 2210 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 2211 vrele(rdir); 2212 vrele(cdir); 2213 2214 if (!error) { 2215 if (bufseg == UIO_SYSSPACE) 2216 bcopy(bp, buf, strlen(bp) + 1); 2217 else 2218 error = copyout(bp, buf, strlen(bp) + 1); 2219 #ifdef KTRACE 2220 if (KTRPOINT(curthread, KTR_NAMEI)) 2221 ktrnamei(bp); 2222 #endif 2223 } 2224 free(tmpbuf, M_TEMP); 2225 return (error); 2226 } 2227 2228 /* 2229 * Thus begins the fullpath magic. 2230 */ 2231 2232 static int __read_mostly disablefullpath; 2233 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 2234 "Disable the vn_fullpath function"); 2235 2236 /* 2237 * Retrieve the full filesystem path that correspond to a vnode from the name 2238 * cache (if available) 2239 */ 2240 int 2241 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 2242 { 2243 char *buf; 2244 struct filedesc *fdp; 2245 struct vnode *rdir; 2246 int error; 2247 2248 if (__predict_false(disablefullpath)) 2249 return (ENODEV); 2250 if (__predict_false(vn == NULL)) 2251 return (EINVAL); 2252 2253 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2254 fdp = td->td_proc->p_fd; 2255 FILEDESC_SLOCK(fdp); 2256 rdir = fdp->fd_rdir; 2257 vrefact(rdir); 2258 FILEDESC_SUNLOCK(fdp); 2259 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 2260 vrele(rdir); 2261 2262 if (!error) 2263 *freebuf = buf; 2264 else 2265 free(buf, M_TEMP); 2266 return (error); 2267 } 2268 2269 /* 2270 * This function is similar to vn_fullpath, but it attempts to lookup the 2271 * pathname relative to the global root mount point. This is required for the 2272 * auditing sub-system, as audited pathnames must be absolute, relative to the 2273 * global root mount point. 2274 */ 2275 int 2276 vn_fullpath_global(struct thread *td, struct vnode *vn, 2277 char **retbuf, char **freebuf) 2278 { 2279 char *buf; 2280 int error; 2281 2282 if (__predict_false(disablefullpath)) 2283 return (ENODEV); 2284 if (__predict_false(vn == NULL)) 2285 return (EINVAL); 2286 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 2287 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 2288 if (!error) 2289 *freebuf = buf; 2290 else 2291 free(buf, M_TEMP); 2292 return (error); 2293 } 2294 2295 int 2296 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) 2297 { 2298 struct vnode *dvp; 2299 struct namecache *ncp; 2300 struct mtx *vlp; 2301 int error; 2302 2303 vlp = VP2VNODELOCK(*vp); 2304 mtx_lock(vlp); 2305 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) { 2306 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 2307 break; 2308 } 2309 if (ncp != NULL) { 2310 if (*buflen < ncp->nc_nlen) { 2311 mtx_unlock(vlp); 2312 vrele(*vp); 2313 counter_u64_add(numfullpathfail4, 1); 2314 error = ENOMEM; 2315 SDT_PROBE3(vfs, namecache, fullpath, return, error, 2316 vp, NULL); 2317 return (error); 2318 } 2319 *buflen -= ncp->nc_nlen; 2320 memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen); 2321 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp, 2322 ncp->nc_name, vp); 2323 dvp = *vp; 2324 *vp = ncp->nc_dvp; 2325 vref(*vp); 2326 mtx_unlock(vlp); 2327 vrele(dvp); 2328 return (0); 2329 } 2330 SDT_PROBE1(vfs, namecache, fullpath, miss, vp); 2331 2332 mtx_unlock(vlp); 2333 vn_lock(*vp, LK_SHARED | LK_RETRY); 2334 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen); 2335 vput(*vp); 2336 if (error) { 2337 counter_u64_add(numfullpathfail2, 1); 2338 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 2339 return (error); 2340 } 2341 2342 *vp = dvp; 2343 if (VN_IS_DOOMED(dvp)) { 2344 /* forced unmount */ 2345 vrele(dvp); 2346 error = ENOENT; 2347 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 2348 return (error); 2349 } 2350 /* 2351 * *vp has its use count incremented still. 2352 */ 2353 2354 return (0); 2355 } 2356 2357 /* 2358 * The magic behind kern___getcwd() and vn_fullpath(). 2359 */ 2360 static int 2361 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 2362 char *buf, char **retbuf, u_int buflen) 2363 { 2364 int error, slash_prefixed; 2365 #ifdef KDTRACE_HOOKS 2366 struct vnode *startvp = vp; 2367 #endif 2368 struct vnode *vp1; 2369 2370 buflen--; 2371 buf[buflen] = '\0'; 2372 error = 0; 2373 slash_prefixed = 0; 2374 2375 SDT_PROBE1(vfs, namecache, fullpath, entry, vp); 2376 counter_u64_add(numfullpathcalls, 1); 2377 vref(vp); 2378 if (vp->v_type != VDIR) { 2379 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen); 2380 if (error) 2381 return (error); 2382 if (buflen == 0) { 2383 vrele(vp); 2384 return (ENOMEM); 2385 } 2386 buf[--buflen] = '/'; 2387 slash_prefixed = 1; 2388 } 2389 while (vp != rdir && vp != rootvnode) { 2390 /* 2391 * The vp vnode must be already fully constructed, 2392 * since it is either found in namecache or obtained 2393 * from VOP_VPTOCNP(). We may test for VV_ROOT safely 2394 * without obtaining the vnode lock. 2395 */ 2396 if ((vp->v_vflag & VV_ROOT) != 0) { 2397 vn_lock(vp, LK_RETRY | LK_SHARED); 2398 2399 /* 2400 * With the vnode locked, check for races with 2401 * unmount, forced or not. Note that we 2402 * already verified that vp is not equal to 2403 * the root vnode, which means that 2404 * mnt_vnodecovered can be NULL only for the 2405 * case of unmount. 2406 */ 2407 if (VN_IS_DOOMED(vp) || 2408 (vp1 = vp->v_mount->mnt_vnodecovered) == NULL || 2409 vp1->v_mountedhere != vp->v_mount) { 2410 vput(vp); 2411 error = ENOENT; 2412 SDT_PROBE3(vfs, namecache, fullpath, return, 2413 error, vp, NULL); 2414 break; 2415 } 2416 2417 vref(vp1); 2418 vput(vp); 2419 vp = vp1; 2420 continue; 2421 } 2422 if (vp->v_type != VDIR) { 2423 vrele(vp); 2424 counter_u64_add(numfullpathfail1, 1); 2425 error = ENOTDIR; 2426 SDT_PROBE3(vfs, namecache, fullpath, return, 2427 error, vp, NULL); 2428 break; 2429 } 2430 error = vn_vptocnp(&vp, td->td_ucred, buf, &buflen); 2431 if (error) 2432 break; 2433 if (buflen == 0) { 2434 vrele(vp); 2435 error = ENOMEM; 2436 SDT_PROBE3(vfs, namecache, fullpath, return, error, 2437 startvp, NULL); 2438 break; 2439 } 2440 buf[--buflen] = '/'; 2441 slash_prefixed = 1; 2442 } 2443 if (error) 2444 return (error); 2445 if (!slash_prefixed) { 2446 if (buflen == 0) { 2447 vrele(vp); 2448 counter_u64_add(numfullpathfail4, 1); 2449 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM, 2450 startvp, NULL); 2451 return (ENOMEM); 2452 } 2453 buf[--buflen] = '/'; 2454 } 2455 counter_u64_add(numfullpathfound, 1); 2456 vrele(vp); 2457 2458 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen); 2459 *retbuf = buf + buflen; 2460 return (0); 2461 } 2462 2463 struct vnode * 2464 vn_dir_dd_ino(struct vnode *vp) 2465 { 2466 struct namecache *ncp; 2467 struct vnode *ddvp; 2468 struct mtx *vlp; 2469 enum vgetstate vs; 2470 2471 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 2472 vlp = VP2VNODELOCK(vp); 2473 mtx_lock(vlp); 2474 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 2475 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 2476 continue; 2477 ddvp = ncp->nc_dvp; 2478 vs = vget_prep(ddvp); 2479 mtx_unlock(vlp); 2480 if (vget_finish(ddvp, LK_SHARED | LK_NOWAIT, vs)) 2481 return (NULL); 2482 return (ddvp); 2483 } 2484 mtx_unlock(vlp); 2485 return (NULL); 2486 } 2487 2488 int 2489 vn_commname(struct vnode *vp, char *buf, u_int buflen) 2490 { 2491 struct namecache *ncp; 2492 struct mtx *vlp; 2493 int l; 2494 2495 vlp = VP2VNODELOCK(vp); 2496 mtx_lock(vlp); 2497 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 2498 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 2499 break; 2500 if (ncp == NULL) { 2501 mtx_unlock(vlp); 2502 return (ENOENT); 2503 } 2504 l = min(ncp->nc_nlen, buflen - 1); 2505 memcpy(buf, ncp->nc_name, l); 2506 mtx_unlock(vlp); 2507 buf[l] = '\0'; 2508 return (0); 2509 } 2510 2511 /* 2512 * This function updates path string to vnode's full global path 2513 * and checks the size of the new path string against the pathlen argument. 2514 * 2515 * Requires a locked, referenced vnode. 2516 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 2517 * 2518 * If sysctl debug.disablefullpath is set, ENODEV is returned, 2519 * vnode is left locked and path remain untouched. 2520 * 2521 * If vp is a directory, the call to vn_fullpath_global() always succeeds 2522 * because it falls back to the ".." lookup if the namecache lookup fails. 2523 */ 2524 int 2525 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 2526 u_int pathlen) 2527 { 2528 struct nameidata nd; 2529 struct vnode *vp1; 2530 char *rpath, *fbuf; 2531 int error; 2532 2533 ASSERT_VOP_ELOCKED(vp, __func__); 2534 2535 /* Return ENODEV if sysctl debug.disablefullpath==1 */ 2536 if (__predict_false(disablefullpath)) 2537 return (ENODEV); 2538 2539 /* Construct global filesystem path from vp. */ 2540 VOP_UNLOCK(vp); 2541 error = vn_fullpath_global(td, vp, &rpath, &fbuf); 2542 2543 if (error != 0) { 2544 vrele(vp); 2545 return (error); 2546 } 2547 2548 if (strlen(rpath) >= pathlen) { 2549 vrele(vp); 2550 error = ENAMETOOLONG; 2551 goto out; 2552 } 2553 2554 /* 2555 * Re-lookup the vnode by path to detect a possible rename. 2556 * As a side effect, the vnode is relocked. 2557 * If vnode was renamed, return ENOENT. 2558 */ 2559 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 2560 UIO_SYSSPACE, path, td); 2561 error = namei(&nd); 2562 if (error != 0) { 2563 vrele(vp); 2564 goto out; 2565 } 2566 NDFREE(&nd, NDF_ONLY_PNBUF); 2567 vp1 = nd.ni_vp; 2568 vrele(vp); 2569 if (vp1 == vp) 2570 strcpy(path, rpath); 2571 else { 2572 vput(vp1); 2573 error = ENOENT; 2574 } 2575 2576 out: 2577 free(fbuf, M_TEMP); 2578 return (error); 2579 } 2580 2581 #ifdef DDB 2582 static void 2583 db_print_vpath(struct vnode *vp) 2584 { 2585 2586 while (vp != NULL) { 2587 db_printf("%p: ", vp); 2588 if (vp == rootvnode) { 2589 db_printf("/"); 2590 vp = NULL; 2591 } else { 2592 if (vp->v_vflag & VV_ROOT) { 2593 db_printf("<mount point>"); 2594 vp = vp->v_mount->mnt_vnodecovered; 2595 } else { 2596 struct namecache *ncp; 2597 char *ncn; 2598 int i; 2599 2600 ncp = TAILQ_FIRST(&vp->v_cache_dst); 2601 if (ncp != NULL) { 2602 ncn = ncp->nc_name; 2603 for (i = 0; i < ncp->nc_nlen; i++) 2604 db_printf("%c", *ncn++); 2605 vp = ncp->nc_dvp; 2606 } else { 2607 vp = NULL; 2608 } 2609 } 2610 } 2611 db_printf("\n"); 2612 } 2613 2614 return; 2615 } 2616 2617 DB_SHOW_COMMAND(vpath, db_show_vpath) 2618 { 2619 struct vnode *vp; 2620 2621 if (!have_addr) { 2622 db_printf("usage: show vpath <struct vnode *>\n"); 2623 return; 2624 } 2625 2626 vp = (struct vnode *)addr; 2627 db_print_vpath(vp); 2628 } 2629 2630 #endif 2631