1 /*- 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/counter.h> 43 #include <sys/filedesc.h> 44 #include <sys/fnv_hash.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/fcntl.h> 49 #include <sys/mount.h> 50 #include <sys/namei.h> 51 #include <sys/proc.h> 52 #include <sys/rwlock.h> 53 #include <sys/sdt.h> 54 #include <sys/syscallsubr.h> 55 #include <sys/sysctl.h> 56 #include <sys/sysproto.h> 57 #include <sys/vnode.h> 58 #ifdef KTRACE 59 #include <sys/ktrace.h> 60 #endif 61 62 #include <vm/uma.h> 63 64 SDT_PROVIDER_DECLARE(vfs); 65 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 66 "struct vnode *"); 67 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 68 "char *"); 69 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 70 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 71 "char *", "struct vnode *"); 72 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 73 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 74 "struct vnode *", "char *"); 75 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 76 "struct vnode *"); 77 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 78 "struct vnode *", "char *"); 79 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 80 "char *"); 81 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *"); 82 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 83 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 84 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 85 "struct vnode *"); 86 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", 87 "char *"); 88 89 /* 90 * This structure describes the elements in the cache of recent 91 * names looked up by namei. 92 */ 93 94 struct namecache { 95 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 96 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 97 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 98 struct vnode *nc_dvp; /* vnode of parent of name */ 99 struct vnode *nc_vp; /* vnode the name refers to */ 100 u_char nc_flag; /* flag bits */ 101 u_char nc_nlen; /* length of name */ 102 char nc_name[0]; /* segment name + nul */ 103 }; 104 105 /* 106 * struct namecache_ts repeats struct namecache layout up to the 107 * nc_nlen member. 108 * struct namecache_ts is used in place of struct namecache when time(s) need 109 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 110 * both a non-dotdot directory name plus dotdot for the directory's 111 * parent. 112 */ 113 struct namecache_ts { 114 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 115 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 116 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 117 struct vnode *nc_dvp; /* vnode of parent of name */ 118 struct vnode *nc_vp; /* vnode the name refers to */ 119 u_char nc_flag; /* flag bits */ 120 u_char nc_nlen; /* length of name */ 121 struct timespec nc_time; /* timespec provided by fs */ 122 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 123 int nc_ticks; /* ticks value when entry was added */ 124 char nc_name[0]; /* segment name + nul */ 125 }; 126 127 /* 128 * Flags in namecache.nc_flag 129 */ 130 #define NCF_WHITE 0x01 131 #define NCF_ISDOTDOT 0x02 132 #define NCF_TS 0x04 133 #define NCF_DTS 0x08 134 135 /* 136 * Name caching works as follows: 137 * 138 * Names found by directory scans are retained in a cache 139 * for future reference. It is managed LRU, so frequently 140 * used names will hang around. Cache is indexed by hash value 141 * obtained from (vp, name) where vp refers to the directory 142 * containing name. 143 * 144 * If it is a "negative" entry, (i.e. for a name that is known NOT to 145 * exist) the vnode pointer will be NULL. 146 * 147 * Upon reaching the last segment of a path, if the reference 148 * is for DELETE, or NOCACHE is set (rewrite), and the 149 * name is located in the cache, it will be dropped. 150 */ 151 152 /* 153 * Structures associated with name cacheing. 154 */ 155 #define NCHHASH(hash) \ 156 (&nchashtbl[(hash) & nchash]) 157 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 158 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 159 static u_long nchash; /* size of hash table */ 160 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 161 "Size of namecache hash table"); 162 static u_long ncnegfactor = 16; /* ratio of negative entries */ 163 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 164 "Ratio of negative namecache entries"); 165 static u_long numneg; /* number of negative entries allocated */ 166 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 167 "Number of negative entries in namecache"); 168 static u_long numcache; /* number of cache entries allocated */ 169 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 170 "Number of namecache entries"); 171 static u_long numcachehv; /* number of cache entries with vnodes held */ 172 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, 173 "Number of namecache entries with vnodes held"); 174 static u_int ncsizefactor = 2; 175 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0, 176 "Size factor for namecache"); 177 178 struct nchstats nchstats; /* cache effectiveness statistics */ 179 180 static struct rwlock cache_lock; 181 RW_SYSINIT(vfscache, &cache_lock, "Name Cache"); 182 183 #define CACHE_UPGRADE_LOCK() rw_try_upgrade(&cache_lock) 184 #define CACHE_RLOCK() rw_rlock(&cache_lock) 185 #define CACHE_RUNLOCK() rw_runlock(&cache_lock) 186 #define CACHE_WLOCK() rw_wlock(&cache_lock) 187 #define CACHE_WUNLOCK() rw_wunlock(&cache_lock) 188 189 /* 190 * UMA zones for the VFS cache. 191 * 192 * The small cache is used for entries with short names, which are the 193 * most common. The large cache is used for entries which are too big to 194 * fit in the small cache. 195 */ 196 static uma_zone_t cache_zone_small; 197 static uma_zone_t cache_zone_small_ts; 198 static uma_zone_t cache_zone_large; 199 static uma_zone_t cache_zone_large_ts; 200 201 #define CACHE_PATH_CUTOFF 35 202 203 static struct namecache * 204 cache_alloc(int len, int ts) 205 { 206 207 if (len > CACHE_PATH_CUTOFF) { 208 if (ts) 209 return (uma_zalloc(cache_zone_large_ts, M_WAITOK)); 210 else 211 return (uma_zalloc(cache_zone_large, M_WAITOK)); 212 } 213 if (ts) 214 return (uma_zalloc(cache_zone_small_ts, M_WAITOK)); 215 else 216 return (uma_zalloc(cache_zone_small, M_WAITOK)); 217 } 218 219 static void 220 cache_free(struct namecache *ncp) 221 { 222 int ts; 223 224 if (ncp == NULL) 225 return; 226 ts = ncp->nc_flag & NCF_TS; 227 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) { 228 if (ts) 229 uma_zfree(cache_zone_small_ts, ncp); 230 else 231 uma_zfree(cache_zone_small, ncp); 232 } else if (ts) 233 uma_zfree(cache_zone_large_ts, ncp); 234 else 235 uma_zfree(cache_zone_large, ncp); 236 } 237 238 static char * 239 nc_get_name(struct namecache *ncp) 240 { 241 struct namecache_ts *ncp_ts; 242 243 if ((ncp->nc_flag & NCF_TS) == 0) 244 return (ncp->nc_name); 245 ncp_ts = (struct namecache_ts *)ncp; 246 return (ncp_ts->nc_name); 247 } 248 249 static void 250 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 251 { 252 253 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 254 (tsp == NULL && ticksp == NULL), 255 ("No NCF_TS")); 256 257 if (tsp != NULL) 258 *tsp = ((struct namecache_ts *)ncp)->nc_time; 259 if (ticksp != NULL) 260 *ticksp = ((struct namecache_ts *)ncp)->nc_ticks; 261 } 262 263 static int doingcache = 1; /* 1 => enable the cache */ 264 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 265 "VFS namecache enabled"); 266 267 /* Export size information to userland */ 268 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 269 sizeof(struct namecache), "sizeof(struct namecache)"); 270 271 /* 272 * The new name cache statistics 273 */ 274 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, 275 "Name cache statistics"); 276 #define STATNODE_ULONG(name, descr) \ 277 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr); 278 #define STATNODE_COUNTER(name, descr) \ 279 static counter_u64_t name; \ 280 SYSCTL_COUNTER_U64(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, descr); 281 STATNODE_ULONG(numneg, "Number of negative cache entries"); 282 STATNODE_ULONG(numcache, "Number of cache entries"); 283 STATNODE_COUNTER(numcalls, "Number of cache lookups"); 284 STATNODE_COUNTER(dothits, "Number of '.' hits"); 285 STATNODE_COUNTER(dotdothits, "Number of '..' hits"); 286 STATNODE_COUNTER(numchecks, "Number of checks in lookup"); 287 STATNODE_COUNTER(nummiss, "Number of cache misses"); 288 STATNODE_COUNTER(nummisszap, "Number of cache misses we do not want to cache"); 289 STATNODE_COUNTER(numposzaps, 290 "Number of cache hits (positive) we do not want to cache"); 291 STATNODE_COUNTER(numposhits, "Number of cache hits (positive)"); 292 STATNODE_COUNTER(numnegzaps, 293 "Number of cache hits (negative) we do not want to cache"); 294 STATNODE_COUNTER(numneghits, "Number of cache hits (negative)"); 295 /* These count for kern___getcwd(), too. */ 296 STATNODE_COUNTER(numfullpathcalls, "Number of fullpath search calls"); 297 STATNODE_COUNTER(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 298 STATNODE_COUNTER(numfullpathfail2, 299 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 300 STATNODE_COUNTER(numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 301 STATNODE_COUNTER(numfullpathfound, "Number of successful fullpath calls"); 302 static long numupgrades; STATNODE_ULONG(numupgrades, 303 "Number of updates of the cache after lookup (write lock + retry)"); 304 305 static void cache_zap(struct namecache *ncp); 306 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 307 u_int *buflen); 308 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 309 char *buf, char **retbuf, u_int buflen); 310 311 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 312 313 static uint32_t 314 cache_get_hash(char *name, u_char len, struct vnode *dvp) 315 { 316 uint32_t hash; 317 318 hash = fnv_32_buf(name, len, FNV1_32_INIT); 319 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 320 return (hash); 321 } 322 323 static int 324 sysctl_nchstats(SYSCTL_HANDLER_ARGS) 325 { 326 struct nchstats snap; 327 328 if (req->oldptr == NULL) 329 return (SYSCTL_OUT(req, 0, sizeof(snap))); 330 331 snap = nchstats; 332 snap.ncs_goodhits = counter_u64_fetch(numposhits); 333 snap.ncs_neghits = counter_u64_fetch(numneghits); 334 snap.ncs_badhits = counter_u64_fetch(numposzaps) + 335 counter_u64_fetch(numnegzaps); 336 snap.ncs_miss = counter_u64_fetch(nummisszap) + 337 counter_u64_fetch(nummiss); 338 339 return (SYSCTL_OUT(req, &snap, sizeof(snap))); 340 } 341 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE | CTLFLAG_RD | 342 CTLFLAG_MPSAFE, 0, 0, sysctl_nchstats, "LU", 343 "VFS cache effectiveness statistics"); 344 345 #ifdef DIAGNOSTIC 346 /* 347 * Grab an atomic snapshot of the name cache hash chain lengths 348 */ 349 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, 350 "hash table stats"); 351 352 static int 353 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 354 { 355 struct nchashhead *ncpp; 356 struct namecache *ncp; 357 int i, error, n_nchash, *cntbuf; 358 359 retry: 360 n_nchash = nchash + 1; /* nchash is max index, not count */ 361 if (req->oldptr == NULL) 362 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 363 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 364 CACHE_RLOCK(); 365 if (n_nchash != nchash + 1) { 366 CACHE_RUNLOCK(); 367 free(cntbuf, M_TEMP); 368 goto retry; 369 } 370 /* Scan hash tables counting entries */ 371 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 372 LIST_FOREACH(ncp, ncpp, nc_hash) 373 cntbuf[i]++; 374 CACHE_RUNLOCK(); 375 for (error = 0, i = 0; i < n_nchash; i++) 376 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 377 break; 378 free(cntbuf, M_TEMP); 379 return (error); 380 } 381 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 382 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 383 "nchash chain lengths"); 384 385 static int 386 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 387 { 388 int error; 389 struct nchashhead *ncpp; 390 struct namecache *ncp; 391 int n_nchash; 392 int count, maxlength, used, pct; 393 394 if (!req->oldptr) 395 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 396 397 CACHE_RLOCK(); 398 n_nchash = nchash + 1; /* nchash is max index, not count */ 399 used = 0; 400 maxlength = 0; 401 402 /* Scan hash tables for applicable entries */ 403 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 404 count = 0; 405 LIST_FOREACH(ncp, ncpp, nc_hash) { 406 count++; 407 } 408 if (count) 409 used++; 410 if (maxlength < count) 411 maxlength = count; 412 } 413 n_nchash = nchash + 1; 414 CACHE_RUNLOCK(); 415 pct = (used * 100) / (n_nchash / 100); 416 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 417 if (error) 418 return (error); 419 error = SYSCTL_OUT(req, &used, sizeof(used)); 420 if (error) 421 return (error); 422 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 423 if (error) 424 return (error); 425 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 426 if (error) 427 return (error); 428 return (0); 429 } 430 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 431 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 432 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 433 #endif 434 435 /* 436 * cache_zap(): 437 * 438 * Removes a namecache entry from cache, whether it contains an actual 439 * pointer to a vnode or if it is just a negative cache entry. 440 */ 441 static void 442 cache_zap(struct namecache *ncp) 443 { 444 struct vnode *vp; 445 446 rw_assert(&cache_lock, RA_WLOCKED); 447 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); 448 if (ncp->nc_vp != NULL) { 449 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp, 450 nc_get_name(ncp), ncp->nc_vp); 451 } else { 452 SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp, 453 nc_get_name(ncp)); 454 } 455 vp = NULL; 456 LIST_REMOVE(ncp, nc_hash); 457 if (ncp->nc_flag & NCF_ISDOTDOT) { 458 if (ncp == ncp->nc_dvp->v_cache_dd) 459 ncp->nc_dvp->v_cache_dd = NULL; 460 } else { 461 LIST_REMOVE(ncp, nc_src); 462 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 463 vp = ncp->nc_dvp; 464 numcachehv--; 465 } 466 } 467 if (ncp->nc_vp) { 468 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 469 if (ncp == ncp->nc_vp->v_cache_dd) 470 ncp->nc_vp->v_cache_dd = NULL; 471 } else { 472 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 473 numneg--; 474 } 475 numcache--; 476 cache_free(ncp); 477 if (vp != NULL) 478 vdrop(vp); 479 } 480 481 /* 482 * Lookup an entry in the cache 483 * 484 * Lookup is called with dvp pointing to the directory to search, 485 * cnp pointing to the name of the entry being sought. If the lookup 486 * succeeds, the vnode is returned in *vpp, and a status of -1 is 487 * returned. If the lookup determines that the name does not exist 488 * (negative cacheing), a status of ENOENT is returned. If the lookup 489 * fails, a status of zero is returned. If the directory vnode is 490 * recycled out from under us due to a forced unmount, a status of 491 * ENOENT is returned. 492 * 493 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 494 * unlocked. If we're looking up . an extra ref is taken, but the lock is 495 * not recursively acquired. 496 */ 497 498 int 499 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 500 struct timespec *tsp, int *ticksp) 501 { 502 struct namecache *ncp; 503 uint32_t hash; 504 int error, ltype, wlocked; 505 506 if (!doingcache) { 507 cnp->cn_flags &= ~MAKEENTRY; 508 return (0); 509 } 510 retry: 511 wlocked = 0; 512 counter_u64_add(numcalls, 1); 513 error = 0; 514 515 retry_wlocked: 516 if (cnp->cn_nameptr[0] == '.') { 517 if (cnp->cn_namelen == 1) { 518 *vpp = dvp; 519 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 520 dvp, cnp->cn_nameptr); 521 counter_u64_add(dothits, 1); 522 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp); 523 if (tsp != NULL) 524 timespecclear(tsp); 525 if (ticksp != NULL) 526 *ticksp = ticks; 527 VREF(*vpp); 528 /* 529 * When we lookup "." we still can be asked to lock it 530 * differently... 531 */ 532 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 533 if (ltype != VOP_ISLOCKED(*vpp)) { 534 if (ltype == LK_EXCLUSIVE) { 535 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 536 if ((*vpp)->v_iflag & VI_DOOMED) { 537 /* forced unmount */ 538 vrele(*vpp); 539 *vpp = NULL; 540 return (ENOENT); 541 } 542 } else 543 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 544 } 545 return (-1); 546 } 547 if (!wlocked) 548 CACHE_RLOCK(); 549 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 550 counter_u64_add(dotdothits, 1); 551 if (dvp->v_cache_dd == NULL) { 552 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, 553 "..", NULL); 554 goto unlock; 555 } 556 if ((cnp->cn_flags & MAKEENTRY) == 0) { 557 if (!wlocked && !CACHE_UPGRADE_LOCK()) 558 goto wlock; 559 if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT) 560 cache_zap(dvp->v_cache_dd); 561 dvp->v_cache_dd = NULL; 562 CACHE_WUNLOCK(); 563 return (0); 564 } 565 ncp = dvp->v_cache_dd; 566 if (ncp->nc_flag & NCF_ISDOTDOT) 567 *vpp = ncp->nc_vp; 568 else 569 *vpp = ncp->nc_dvp; 570 /* Return failure if negative entry was found. */ 571 if (*vpp == NULL) 572 goto negative_success; 573 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 574 dvp, cnp->cn_nameptr, *vpp); 575 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", 576 *vpp); 577 cache_out_ts(ncp, tsp, ticksp); 578 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 579 NCF_DTS && tsp != NULL) 580 *tsp = ((struct namecache_ts *)ncp)-> 581 nc_dotdottime; 582 goto success; 583 } 584 } else if (!wlocked) 585 CACHE_RLOCK(); 586 587 hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp); 588 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 589 counter_u64_add(numchecks, 1); 590 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 591 !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen)) 592 break; 593 } 594 595 /* We failed to find an entry */ 596 if (ncp == NULL) { 597 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 598 NULL); 599 if ((cnp->cn_flags & MAKEENTRY) == 0) { 600 counter_u64_add(nummisszap, 1); 601 } else { 602 counter_u64_add(nummiss, 1); 603 } 604 goto unlock; 605 } 606 607 /* We don't want to have an entry, so dump it */ 608 if ((cnp->cn_flags & MAKEENTRY) == 0) { 609 counter_u64_add(numposzaps, 1); 610 if (!wlocked && !CACHE_UPGRADE_LOCK()) 611 goto wlock; 612 cache_zap(ncp); 613 CACHE_WUNLOCK(); 614 return (0); 615 } 616 617 /* We found a "positive" match, return the vnode */ 618 if (ncp->nc_vp) { 619 counter_u64_add(numposhits, 1); 620 *vpp = ncp->nc_vp; 621 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 622 dvp, cnp->cn_nameptr, *vpp, ncp); 623 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp), 624 *vpp); 625 cache_out_ts(ncp, tsp, ticksp); 626 goto success; 627 } 628 629 negative_success: 630 /* We found a negative match, and want to create it, so purge */ 631 if (cnp->cn_nameiop == CREATE) { 632 counter_u64_add(numnegzaps, 1); 633 if (!wlocked && !CACHE_UPGRADE_LOCK()) 634 goto wlock; 635 cache_zap(ncp); 636 CACHE_WUNLOCK(); 637 return (0); 638 } 639 640 if (!wlocked && !CACHE_UPGRADE_LOCK()) 641 goto wlock; 642 counter_u64_add(numneghits, 1); 643 /* 644 * We found a "negative" match, so we shift it to the end of 645 * the "negative" cache entries queue to satisfy LRU. Also, 646 * check to see if the entry is a whiteout; indicate this to 647 * the componentname, if so. 648 */ 649 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 650 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 651 if (ncp->nc_flag & NCF_WHITE) 652 cnp->cn_flags |= ISWHITEOUT; 653 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, 654 nc_get_name(ncp)); 655 cache_out_ts(ncp, tsp, ticksp); 656 CACHE_WUNLOCK(); 657 return (ENOENT); 658 659 wlock: 660 /* 661 * We need to update the cache after our lookup, so upgrade to 662 * a write lock and retry the operation. 663 */ 664 CACHE_RUNLOCK(); 665 CACHE_WLOCK(); 666 numupgrades++; 667 wlocked = 1; 668 goto retry_wlocked; 669 670 success: 671 /* 672 * On success we return a locked and ref'd vnode as per the lookup 673 * protocol. 674 */ 675 MPASS(dvp != *vpp); 676 ltype = 0; /* silence gcc warning */ 677 if (cnp->cn_flags & ISDOTDOT) { 678 ltype = VOP_ISLOCKED(dvp); 679 VOP_UNLOCK(dvp, 0); 680 } 681 vhold(*vpp); 682 if (wlocked) 683 CACHE_WUNLOCK(); 684 else 685 CACHE_RUNLOCK(); 686 error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread); 687 if (cnp->cn_flags & ISDOTDOT) { 688 vn_lock(dvp, ltype | LK_RETRY); 689 if (dvp->v_iflag & VI_DOOMED) { 690 if (error == 0) 691 vput(*vpp); 692 *vpp = NULL; 693 return (ENOENT); 694 } 695 } 696 if (error) { 697 *vpp = NULL; 698 goto retry; 699 } 700 if ((cnp->cn_flags & ISLASTCN) && 701 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 702 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 703 } 704 return (-1); 705 706 unlock: 707 if (wlocked) 708 CACHE_WUNLOCK(); 709 else 710 CACHE_RUNLOCK(); 711 return (0); 712 } 713 714 /* 715 * Add an entry to the cache. 716 */ 717 void 718 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, 719 struct timespec *tsp, struct timespec *dtsp) 720 { 721 struct namecache *ncp, *n2; 722 struct namecache_ts *n3; 723 struct nchashhead *ncpp; 724 uint32_t hash; 725 int flag; 726 int len; 727 728 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 729 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 730 ("cache_enter: Adding a doomed vnode")); 731 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, 732 ("cache_enter: Doomed vnode used as src")); 733 734 if (!doingcache) 735 return; 736 737 /* 738 * Avoid blowout in namecache entries. 739 */ 740 if (numcache >= desiredvnodes * ncsizefactor) 741 return; 742 743 flag = 0; 744 if (cnp->cn_nameptr[0] == '.') { 745 if (cnp->cn_namelen == 1) 746 return; 747 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 748 CACHE_WLOCK(); 749 /* 750 * If dotdot entry already exists, just retarget it 751 * to new parent vnode, otherwise continue with new 752 * namecache entry allocation. 753 */ 754 if ((ncp = dvp->v_cache_dd) != NULL && 755 ncp->nc_flag & NCF_ISDOTDOT) { 756 KASSERT(ncp->nc_dvp == dvp, 757 ("wrong isdotdot parent")); 758 if (ncp->nc_vp != NULL) { 759 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, 760 ncp, nc_dst); 761 } else { 762 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 763 numneg--; 764 } 765 if (vp != NULL) { 766 TAILQ_INSERT_HEAD(&vp->v_cache_dst, 767 ncp, nc_dst); 768 } else { 769 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 770 numneg++; 771 } 772 ncp->nc_vp = vp; 773 CACHE_WUNLOCK(); 774 return; 775 } 776 dvp->v_cache_dd = NULL; 777 SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp); 778 CACHE_WUNLOCK(); 779 flag = NCF_ISDOTDOT; 780 } 781 } 782 783 /* 784 * Calculate the hash key and setup as much of the new 785 * namecache entry as possible before acquiring the lock. 786 */ 787 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 788 ncp->nc_vp = vp; 789 ncp->nc_dvp = dvp; 790 ncp->nc_flag = flag; 791 if (tsp != NULL) { 792 n3 = (struct namecache_ts *)ncp; 793 n3->nc_time = *tsp; 794 n3->nc_ticks = ticks; 795 n3->nc_flag |= NCF_TS; 796 if (dtsp != NULL) { 797 n3->nc_dotdottime = *dtsp; 798 n3->nc_flag |= NCF_DTS; 799 } 800 } 801 len = ncp->nc_nlen = cnp->cn_namelen; 802 hash = cache_get_hash(cnp->cn_nameptr, len, dvp); 803 strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1); 804 CACHE_WLOCK(); 805 806 /* 807 * See if this vnode or negative entry is already in the cache 808 * with this name. This can happen with concurrent lookups of 809 * the same path name. 810 */ 811 ncpp = NCHHASH(hash); 812 LIST_FOREACH(n2, ncpp, nc_hash) { 813 if (n2->nc_dvp == dvp && 814 n2->nc_nlen == cnp->cn_namelen && 815 !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) { 816 if (tsp != NULL) { 817 KASSERT((n2->nc_flag & NCF_TS) != 0, 818 ("no NCF_TS")); 819 n3 = (struct namecache_ts *)n2; 820 n3->nc_time = 821 ((struct namecache_ts *)ncp)->nc_time; 822 n3->nc_ticks = 823 ((struct namecache_ts *)ncp)->nc_ticks; 824 if (dtsp != NULL) { 825 n3->nc_dotdottime = 826 ((struct namecache_ts *)ncp)-> 827 nc_dotdottime; 828 n3->nc_flag |= NCF_DTS; 829 } 830 } 831 CACHE_WUNLOCK(); 832 cache_free(ncp); 833 return; 834 } 835 } 836 837 if (flag == NCF_ISDOTDOT) { 838 /* 839 * See if we are trying to add .. entry, but some other lookup 840 * has populated v_cache_dd pointer already. 841 */ 842 if (dvp->v_cache_dd != NULL) { 843 CACHE_WUNLOCK(); 844 cache_free(ncp); 845 return; 846 } 847 KASSERT(vp == NULL || vp->v_type == VDIR, 848 ("wrong vnode type %p", vp)); 849 dvp->v_cache_dd = ncp; 850 } 851 852 numcache++; 853 if (vp != NULL) { 854 if (vp->v_type == VDIR) { 855 if (flag != NCF_ISDOTDOT) { 856 /* 857 * For this case, the cache entry maps both the 858 * directory name in it and the name ".." for the 859 * directory's parent. 860 */ 861 if ((n2 = vp->v_cache_dd) != NULL && 862 (n2->nc_flag & NCF_ISDOTDOT) != 0) 863 cache_zap(n2); 864 vp->v_cache_dd = ncp; 865 } 866 } else { 867 vp->v_cache_dd = NULL; 868 } 869 } 870 871 /* 872 * Insert the new namecache entry into the appropriate chain 873 * within the cache entries table. 874 */ 875 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 876 if (flag != NCF_ISDOTDOT) { 877 if (LIST_EMPTY(&dvp->v_cache_src)) { 878 vhold(dvp); 879 numcachehv++; 880 } 881 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 882 } 883 884 /* 885 * If the entry is "negative", we place it into the 886 * "negative" cache queue, otherwise, we place it into the 887 * destination vnode's cache entries queue. 888 */ 889 if (vp != NULL) { 890 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 891 SDT_PROBE3(vfs, namecache, enter, done, dvp, nc_get_name(ncp), 892 vp); 893 } else { 894 if (cnp->cn_flags & ISWHITEOUT) 895 ncp->nc_flag |= NCF_WHITE; 896 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 897 numneg++; 898 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp, 899 nc_get_name(ncp)); 900 } 901 if (numneg * ncnegfactor > numcache) { 902 ncp = TAILQ_FIRST(&ncneg); 903 KASSERT(ncp->nc_vp == NULL, ("ncp %p vp %p on ncneg", 904 ncp, ncp->nc_vp)); 905 cache_zap(ncp); 906 } 907 CACHE_WUNLOCK(); 908 } 909 910 /* 911 * Name cache initialization, from vfs_init() when we are booting 912 */ 913 static void 914 nchinit(void *dummy __unused) 915 { 916 917 TAILQ_INIT(&ncneg); 918 919 cache_zone_small = uma_zcreate("S VFS Cache", 920 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1, 921 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 922 cache_zone_small_ts = uma_zcreate("STS VFS Cache", 923 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1, 924 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 925 cache_zone_large = uma_zcreate("L VFS Cache", 926 sizeof(struct namecache) + NAME_MAX + 1, 927 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 928 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", 929 sizeof(struct namecache_ts) + NAME_MAX + 1, 930 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 931 932 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 933 934 numcalls = counter_u64_alloc(M_WAITOK); 935 dothits = counter_u64_alloc(M_WAITOK); 936 dotdothits = counter_u64_alloc(M_WAITOK); 937 numchecks = counter_u64_alloc(M_WAITOK); 938 nummiss = counter_u64_alloc(M_WAITOK); 939 nummisszap = counter_u64_alloc(M_WAITOK); 940 numposzaps = counter_u64_alloc(M_WAITOK); 941 numposhits = counter_u64_alloc(M_WAITOK); 942 numnegzaps = counter_u64_alloc(M_WAITOK); 943 numneghits = counter_u64_alloc(M_WAITOK); 944 numfullpathcalls = counter_u64_alloc(M_WAITOK); 945 numfullpathfail1 = counter_u64_alloc(M_WAITOK); 946 numfullpathfail2 = counter_u64_alloc(M_WAITOK); 947 numfullpathfail4 = counter_u64_alloc(M_WAITOK); 948 numfullpathfound = counter_u64_alloc(M_WAITOK); 949 } 950 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 951 952 void 953 cache_changesize(int newmaxvnodes) 954 { 955 struct nchashhead *new_nchashtbl, *old_nchashtbl; 956 u_long new_nchash, old_nchash; 957 struct namecache *ncp; 958 uint32_t hash; 959 int i; 960 961 new_nchashtbl = hashinit(newmaxvnodes * 2, M_VFSCACHE, &new_nchash); 962 /* If same hash table size, nothing to do */ 963 if (nchash == new_nchash) { 964 free(new_nchashtbl, M_VFSCACHE); 965 return; 966 } 967 /* 968 * Move everything from the old hash table to the new table. 969 * None of the namecache entries in the table can be removed 970 * because to do so, they have to be removed from the hash table. 971 */ 972 CACHE_WLOCK(); 973 old_nchashtbl = nchashtbl; 974 old_nchash = nchash; 975 nchashtbl = new_nchashtbl; 976 nchash = new_nchash; 977 for (i = 0; i <= old_nchash; i++) { 978 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) { 979 hash = cache_get_hash(nc_get_name(ncp), ncp->nc_nlen, 980 ncp->nc_dvp); 981 LIST_REMOVE(ncp, nc_hash); 982 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash); 983 } 984 } 985 CACHE_WUNLOCK(); 986 free(old_nchashtbl, M_VFSCACHE); 987 } 988 989 /* 990 * Invalidate all entries to a particular vnode. 991 */ 992 void 993 cache_purge(struct vnode *vp) 994 { 995 996 CTR1(KTR_VFS, "cache_purge(%p)", vp); 997 SDT_PROBE1(vfs, namecache, purge, done, vp); 998 CACHE_WLOCK(); 999 while (!LIST_EMPTY(&vp->v_cache_src)) 1000 cache_zap(LIST_FIRST(&vp->v_cache_src)); 1001 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 1002 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 1003 if (vp->v_cache_dd != NULL) { 1004 KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT, 1005 ("lost dotdot link")); 1006 cache_zap(vp->v_cache_dd); 1007 } 1008 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 1009 CACHE_WUNLOCK(); 1010 } 1011 1012 /* 1013 * Invalidate all negative entries for a particular directory vnode. 1014 */ 1015 void 1016 cache_purge_negative(struct vnode *vp) 1017 { 1018 struct namecache *cp, *ncp; 1019 1020 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp); 1021 SDT_PROBE1(vfs, namecache, purge_negative, done, vp); 1022 CACHE_WLOCK(); 1023 LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) { 1024 if (cp->nc_vp == NULL) 1025 cache_zap(cp); 1026 } 1027 CACHE_WUNLOCK(); 1028 } 1029 1030 /* 1031 * Flush all entries referencing a particular filesystem. 1032 */ 1033 void 1034 cache_purgevfs(struct mount *mp) 1035 { 1036 struct nchashhead *ncpp; 1037 struct namecache *ncp, *nnp; 1038 1039 /* Scan hash tables for applicable entries */ 1040 SDT_PROBE1(vfs, namecache, purgevfs, done, mp); 1041 CACHE_WLOCK(); 1042 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 1043 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) { 1044 if (ncp->nc_dvp->v_mount == mp) 1045 cache_zap(ncp); 1046 } 1047 } 1048 CACHE_WUNLOCK(); 1049 } 1050 1051 /* 1052 * Perform canonical checks and cache lookup and pass on to filesystem 1053 * through the vop_cachedlookup only if needed. 1054 */ 1055 1056 int 1057 vfs_cache_lookup(struct vop_lookup_args *ap) 1058 { 1059 struct vnode *dvp; 1060 int error; 1061 struct vnode **vpp = ap->a_vpp; 1062 struct componentname *cnp = ap->a_cnp; 1063 struct ucred *cred = cnp->cn_cred; 1064 int flags = cnp->cn_flags; 1065 struct thread *td = cnp->cn_thread; 1066 1067 *vpp = NULL; 1068 dvp = ap->a_dvp; 1069 1070 if (dvp->v_type != VDIR) 1071 return (ENOTDIR); 1072 1073 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 1074 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1075 return (EROFS); 1076 1077 error = VOP_ACCESS(dvp, VEXEC, cred, td); 1078 if (error) 1079 return (error); 1080 1081 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 1082 if (error == 0) 1083 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 1084 if (error == -1) 1085 return (0); 1086 return (error); 1087 } 1088 1089 /* 1090 * XXX All of these sysctls would probably be more productive dead. 1091 */ 1092 static int disablecwd; 1093 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 1094 "Disable the getcwd syscall"); 1095 1096 /* Implementation of the getcwd syscall. */ 1097 int 1098 sys___getcwd(struct thread *td, struct __getcwd_args *uap) 1099 { 1100 1101 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen, 1102 MAXPATHLEN)); 1103 } 1104 1105 int 1106 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen, 1107 u_int path_max) 1108 { 1109 char *bp, *tmpbuf; 1110 struct filedesc *fdp; 1111 struct vnode *cdir, *rdir; 1112 int error; 1113 1114 if (disablecwd) 1115 return (ENODEV); 1116 if (buflen < 2) 1117 return (EINVAL); 1118 if (buflen > path_max) 1119 buflen = path_max; 1120 1121 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 1122 fdp = td->td_proc->p_fd; 1123 FILEDESC_SLOCK(fdp); 1124 cdir = fdp->fd_cdir; 1125 VREF(cdir); 1126 rdir = fdp->fd_rdir; 1127 VREF(rdir); 1128 FILEDESC_SUNLOCK(fdp); 1129 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 1130 vrele(rdir); 1131 vrele(cdir); 1132 1133 if (!error) { 1134 if (bufseg == UIO_SYSSPACE) 1135 bcopy(bp, buf, strlen(bp) + 1); 1136 else 1137 error = copyout(bp, buf, strlen(bp) + 1); 1138 #ifdef KTRACE 1139 if (KTRPOINT(curthread, KTR_NAMEI)) 1140 ktrnamei(bp); 1141 #endif 1142 } 1143 free(tmpbuf, M_TEMP); 1144 return (error); 1145 } 1146 1147 /* 1148 * Thus begins the fullpath magic. 1149 */ 1150 1151 static int disablefullpath; 1152 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 1153 "Disable the vn_fullpath function"); 1154 1155 /* 1156 * Retrieve the full filesystem path that correspond to a vnode from the name 1157 * cache (if available) 1158 */ 1159 int 1160 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 1161 { 1162 char *buf; 1163 struct filedesc *fdp; 1164 struct vnode *rdir; 1165 int error; 1166 1167 if (disablefullpath) 1168 return (ENODEV); 1169 if (vn == NULL) 1170 return (EINVAL); 1171 1172 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1173 fdp = td->td_proc->p_fd; 1174 FILEDESC_SLOCK(fdp); 1175 rdir = fdp->fd_rdir; 1176 VREF(rdir); 1177 FILEDESC_SUNLOCK(fdp); 1178 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 1179 vrele(rdir); 1180 1181 if (!error) 1182 *freebuf = buf; 1183 else 1184 free(buf, M_TEMP); 1185 return (error); 1186 } 1187 1188 /* 1189 * This function is similar to vn_fullpath, but it attempts to lookup the 1190 * pathname relative to the global root mount point. This is required for the 1191 * auditing sub-system, as audited pathnames must be absolute, relative to the 1192 * global root mount point. 1193 */ 1194 int 1195 vn_fullpath_global(struct thread *td, struct vnode *vn, 1196 char **retbuf, char **freebuf) 1197 { 1198 char *buf; 1199 int error; 1200 1201 if (disablefullpath) 1202 return (ENODEV); 1203 if (vn == NULL) 1204 return (EINVAL); 1205 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1206 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 1207 if (!error) 1208 *freebuf = buf; 1209 else 1210 free(buf, M_TEMP); 1211 return (error); 1212 } 1213 1214 int 1215 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) 1216 { 1217 int error; 1218 1219 CACHE_RLOCK(); 1220 error = vn_vptocnp_locked(vp, cred, buf, buflen); 1221 if (error == 0) 1222 CACHE_RUNLOCK(); 1223 return (error); 1224 } 1225 1226 static int 1227 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 1228 u_int *buflen) 1229 { 1230 struct vnode *dvp; 1231 struct namecache *ncp; 1232 int error; 1233 1234 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) { 1235 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1236 break; 1237 } 1238 if (ncp != NULL) { 1239 if (*buflen < ncp->nc_nlen) { 1240 CACHE_RUNLOCK(); 1241 vrele(*vp); 1242 counter_u64_add(numfullpathfail4, 1); 1243 error = ENOMEM; 1244 SDT_PROBE3(vfs, namecache, fullpath, return, error, 1245 vp, NULL); 1246 return (error); 1247 } 1248 *buflen -= ncp->nc_nlen; 1249 memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen); 1250 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp, 1251 nc_get_name(ncp), vp); 1252 dvp = *vp; 1253 *vp = ncp->nc_dvp; 1254 vref(*vp); 1255 CACHE_RUNLOCK(); 1256 vrele(dvp); 1257 CACHE_RLOCK(); 1258 return (0); 1259 } 1260 SDT_PROBE1(vfs, namecache, fullpath, miss, vp); 1261 1262 CACHE_RUNLOCK(); 1263 vn_lock(*vp, LK_SHARED | LK_RETRY); 1264 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen); 1265 vput(*vp); 1266 if (error) { 1267 counter_u64_add(numfullpathfail2, 1); 1268 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 1269 return (error); 1270 } 1271 1272 *vp = dvp; 1273 CACHE_RLOCK(); 1274 if (dvp->v_iflag & VI_DOOMED) { 1275 /* forced unmount */ 1276 CACHE_RUNLOCK(); 1277 vrele(dvp); 1278 error = ENOENT; 1279 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 1280 return (error); 1281 } 1282 /* 1283 * *vp has its use count incremented still. 1284 */ 1285 1286 return (0); 1287 } 1288 1289 /* 1290 * The magic behind kern___getcwd() and vn_fullpath(). 1291 */ 1292 static int 1293 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 1294 char *buf, char **retbuf, u_int buflen) 1295 { 1296 int error, slash_prefixed; 1297 #ifdef KDTRACE_HOOKS 1298 struct vnode *startvp = vp; 1299 #endif 1300 struct vnode *vp1; 1301 1302 buflen--; 1303 buf[buflen] = '\0'; 1304 error = 0; 1305 slash_prefixed = 0; 1306 1307 SDT_PROBE1(vfs, namecache, fullpath, entry, vp); 1308 counter_u64_add(numfullpathcalls, 1); 1309 vref(vp); 1310 CACHE_RLOCK(); 1311 if (vp->v_type != VDIR) { 1312 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1313 if (error) 1314 return (error); 1315 if (buflen == 0) { 1316 CACHE_RUNLOCK(); 1317 vrele(vp); 1318 return (ENOMEM); 1319 } 1320 buf[--buflen] = '/'; 1321 slash_prefixed = 1; 1322 } 1323 while (vp != rdir && vp != rootvnode) { 1324 if (vp->v_vflag & VV_ROOT) { 1325 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */ 1326 CACHE_RUNLOCK(); 1327 vrele(vp); 1328 error = ENOENT; 1329 SDT_PROBE3(vfs, namecache, fullpath, return, 1330 error, vp, NULL); 1331 break; 1332 } 1333 vp1 = vp->v_mount->mnt_vnodecovered; 1334 vref(vp1); 1335 CACHE_RUNLOCK(); 1336 vrele(vp); 1337 vp = vp1; 1338 CACHE_RLOCK(); 1339 continue; 1340 } 1341 if (vp->v_type != VDIR) { 1342 CACHE_RUNLOCK(); 1343 vrele(vp); 1344 counter_u64_add(numfullpathfail1, 1); 1345 error = ENOTDIR; 1346 SDT_PROBE3(vfs, namecache, fullpath, return, 1347 error, vp, NULL); 1348 break; 1349 } 1350 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1351 if (error) 1352 break; 1353 if (buflen == 0) { 1354 CACHE_RUNLOCK(); 1355 vrele(vp); 1356 error = ENOMEM; 1357 SDT_PROBE3(vfs, namecache, fullpath, return, error, 1358 startvp, NULL); 1359 break; 1360 } 1361 buf[--buflen] = '/'; 1362 slash_prefixed = 1; 1363 } 1364 if (error) 1365 return (error); 1366 if (!slash_prefixed) { 1367 if (buflen == 0) { 1368 CACHE_RUNLOCK(); 1369 vrele(vp); 1370 counter_u64_add(numfullpathfail4, 1); 1371 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM, 1372 startvp, NULL); 1373 return (ENOMEM); 1374 } 1375 buf[--buflen] = '/'; 1376 } 1377 counter_u64_add(numfullpathfound, 1); 1378 CACHE_RUNLOCK(); 1379 vrele(vp); 1380 1381 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen); 1382 *retbuf = buf + buflen; 1383 return (0); 1384 } 1385 1386 struct vnode * 1387 vn_dir_dd_ino(struct vnode *vp) 1388 { 1389 struct namecache *ncp; 1390 struct vnode *ddvp; 1391 1392 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 1393 CACHE_RLOCK(); 1394 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 1395 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 1396 continue; 1397 ddvp = ncp->nc_dvp; 1398 vhold(ddvp); 1399 CACHE_RUNLOCK(); 1400 if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread)) 1401 return (NULL); 1402 return (ddvp); 1403 } 1404 CACHE_RUNLOCK(); 1405 return (NULL); 1406 } 1407 1408 int 1409 vn_commname(struct vnode *vp, char *buf, u_int buflen) 1410 { 1411 struct namecache *ncp; 1412 int l; 1413 1414 CACHE_RLOCK(); 1415 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 1416 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1417 break; 1418 if (ncp == NULL) { 1419 CACHE_RUNLOCK(); 1420 return (ENOENT); 1421 } 1422 l = min(ncp->nc_nlen, buflen - 1); 1423 memcpy(buf, nc_get_name(ncp), l); 1424 CACHE_RUNLOCK(); 1425 buf[l] = '\0'; 1426 return (0); 1427 } 1428 1429 /* ABI compat shims for old kernel modules. */ 1430 #undef cache_enter 1431 1432 void cache_enter(struct vnode *dvp, struct vnode *vp, 1433 struct componentname *cnp); 1434 1435 void 1436 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 1437 { 1438 1439 cache_enter_time(dvp, vp, cnp, NULL, NULL); 1440 } 1441 1442 /* 1443 * This function updates path string to vnode's full global path 1444 * and checks the size of the new path string against the pathlen argument. 1445 * 1446 * Requires a locked, referenced vnode. 1447 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 1448 * 1449 * If sysctl debug.disablefullpath is set, ENODEV is returned, 1450 * vnode is left locked and path remain untouched. 1451 * 1452 * If vp is a directory, the call to vn_fullpath_global() always succeeds 1453 * because it falls back to the ".." lookup if the namecache lookup fails. 1454 */ 1455 int 1456 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 1457 u_int pathlen) 1458 { 1459 struct nameidata nd; 1460 struct vnode *vp1; 1461 char *rpath, *fbuf; 1462 int error; 1463 1464 ASSERT_VOP_ELOCKED(vp, __func__); 1465 1466 /* Return ENODEV if sysctl debug.disablefullpath==1 */ 1467 if (disablefullpath) 1468 return (ENODEV); 1469 1470 /* Construct global filesystem path from vp. */ 1471 VOP_UNLOCK(vp, 0); 1472 error = vn_fullpath_global(td, vp, &rpath, &fbuf); 1473 1474 if (error != 0) { 1475 vrele(vp); 1476 return (error); 1477 } 1478 1479 if (strlen(rpath) >= pathlen) { 1480 vrele(vp); 1481 error = ENAMETOOLONG; 1482 goto out; 1483 } 1484 1485 /* 1486 * Re-lookup the vnode by path to detect a possible rename. 1487 * As a side effect, the vnode is relocked. 1488 * If vnode was renamed, return ENOENT. 1489 */ 1490 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 1491 UIO_SYSSPACE, path, td); 1492 error = namei(&nd); 1493 if (error != 0) { 1494 vrele(vp); 1495 goto out; 1496 } 1497 NDFREE(&nd, NDF_ONLY_PNBUF); 1498 vp1 = nd.ni_vp; 1499 vrele(vp); 1500 if (vp1 == vp) 1501 strcpy(path, rpath); 1502 else { 1503 vput(vp1); 1504 error = ENOENT; 1505 } 1506 1507 out: 1508 free(fbuf, M_TEMP); 1509 return (error); 1510 } 1511