1 /*- 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/fnv_hash.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/fcntl.h> 48 #include <sys/mount.h> 49 #include <sys/namei.h> 50 #include <sys/proc.h> 51 #include <sys/rwlock.h> 52 #include <sys/sdt.h> 53 #include <sys/syscallsubr.h> 54 #include <sys/sysctl.h> 55 #include <sys/sysproto.h> 56 #include <sys/vnode.h> 57 #ifdef KTRACE 58 #include <sys/ktrace.h> 59 #endif 60 61 #include <vm/uma.h> 62 63 SDT_PROVIDER_DECLARE(vfs); 64 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 65 "struct vnode *"); 66 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 67 "char *"); 68 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 69 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 70 "char *", "struct vnode *"); 71 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 72 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 73 "struct vnode *", "char *"); 74 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 75 "struct vnode *"); 76 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 77 "struct vnode *", "char *"); 78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 79 "char *"); 80 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *"); 81 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 82 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 83 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 84 "struct vnode *"); 85 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", 86 "char *"); 87 88 /* 89 * This structure describes the elements in the cache of recent 90 * names looked up by namei. 91 */ 92 93 struct namecache { 94 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 95 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 96 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 97 struct vnode *nc_dvp; /* vnode of parent of name */ 98 struct vnode *nc_vp; /* vnode the name refers to */ 99 u_char nc_flag; /* flag bits */ 100 u_char nc_nlen; /* length of name */ 101 char nc_name[0]; /* segment name + nul */ 102 }; 103 104 /* 105 * struct namecache_ts repeats struct namecache layout up to the 106 * nc_nlen member. 107 * struct namecache_ts is used in place of struct namecache when time(s) need 108 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 109 * both a non-dotdot directory name plus dotdot for the directory's 110 * parent. 111 */ 112 struct namecache_ts { 113 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 114 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 115 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 116 struct vnode *nc_dvp; /* vnode of parent of name */ 117 struct vnode *nc_vp; /* vnode the name refers to */ 118 u_char nc_flag; /* flag bits */ 119 u_char nc_nlen; /* length of name */ 120 struct timespec nc_time; /* timespec provided by fs */ 121 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 122 int nc_ticks; /* ticks value when entry was added */ 123 char nc_name[0]; /* segment name + nul */ 124 }; 125 126 /* 127 * Flags in namecache.nc_flag 128 */ 129 #define NCF_WHITE 0x01 130 #define NCF_ISDOTDOT 0x02 131 #define NCF_TS 0x04 132 #define NCF_DTS 0x08 133 134 /* 135 * Name caching works as follows: 136 * 137 * Names found by directory scans are retained in a cache 138 * for future reference. It is managed LRU, so frequently 139 * used names will hang around. Cache is indexed by hash value 140 * obtained from (vp, name) where vp refers to the directory 141 * containing name. 142 * 143 * If it is a "negative" entry, (i.e. for a name that is known NOT to 144 * exist) the vnode pointer will be NULL. 145 * 146 * Upon reaching the last segment of a path, if the reference 147 * is for DELETE, or NOCACHE is set (rewrite), and the 148 * name is located in the cache, it will be dropped. 149 */ 150 151 /* 152 * Structures associated with name cacheing. 153 */ 154 #define NCHHASH(hash) \ 155 (&nchashtbl[(hash) & nchash]) 156 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 157 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 158 static u_long nchash; /* size of hash table */ 159 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 160 "Size of namecache hash table"); 161 static u_long ncnegfactor = 16; /* ratio of negative entries */ 162 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 163 "Ratio of negative namecache entries"); 164 static u_long numneg; /* number of negative entries allocated */ 165 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 166 "Number of negative entries in namecache"); 167 static u_long numcache; /* number of cache entries allocated */ 168 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 169 "Number of namecache entries"); 170 static u_long numcachehv; /* number of cache entries with vnodes held */ 171 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, 172 "Number of namecache entries with vnodes held"); 173 static u_int ncsizefactor = 2; 174 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0, 175 "Size factor for namecache"); 176 177 struct nchstats nchstats; /* cache effectiveness statistics */ 178 179 static struct rwlock cache_lock; 180 RW_SYSINIT(vfscache, &cache_lock, "Name Cache"); 181 182 #define CACHE_UPGRADE_LOCK() rw_try_upgrade(&cache_lock) 183 #define CACHE_RLOCK() rw_rlock(&cache_lock) 184 #define CACHE_RUNLOCK() rw_runlock(&cache_lock) 185 #define CACHE_WLOCK() rw_wlock(&cache_lock) 186 #define CACHE_WUNLOCK() rw_wunlock(&cache_lock) 187 188 /* 189 * UMA zones for the VFS cache. 190 * 191 * The small cache is used for entries with short names, which are the 192 * most common. The large cache is used for entries which are too big to 193 * fit in the small cache. 194 */ 195 static uma_zone_t cache_zone_small; 196 static uma_zone_t cache_zone_small_ts; 197 static uma_zone_t cache_zone_large; 198 static uma_zone_t cache_zone_large_ts; 199 200 #define CACHE_PATH_CUTOFF 35 201 202 static struct namecache * 203 cache_alloc(int len, int ts) 204 { 205 206 if (len > CACHE_PATH_CUTOFF) { 207 if (ts) 208 return (uma_zalloc(cache_zone_large_ts, M_WAITOK)); 209 else 210 return (uma_zalloc(cache_zone_large, M_WAITOK)); 211 } 212 if (ts) 213 return (uma_zalloc(cache_zone_small_ts, M_WAITOK)); 214 else 215 return (uma_zalloc(cache_zone_small, M_WAITOK)); 216 } 217 218 static void 219 cache_free(struct namecache *ncp) 220 { 221 int ts; 222 223 if (ncp == NULL) 224 return; 225 ts = ncp->nc_flag & NCF_TS; 226 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) { 227 if (ts) 228 uma_zfree(cache_zone_small_ts, ncp); 229 else 230 uma_zfree(cache_zone_small, ncp); 231 } else if (ts) 232 uma_zfree(cache_zone_large_ts, ncp); 233 else 234 uma_zfree(cache_zone_large, ncp); 235 } 236 237 static char * 238 nc_get_name(struct namecache *ncp) 239 { 240 struct namecache_ts *ncp_ts; 241 242 if ((ncp->nc_flag & NCF_TS) == 0) 243 return (ncp->nc_name); 244 ncp_ts = (struct namecache_ts *)ncp; 245 return (ncp_ts->nc_name); 246 } 247 248 static void 249 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 250 { 251 252 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 253 (tsp == NULL && ticksp == NULL), 254 ("No NCF_TS")); 255 256 if (tsp != NULL) 257 *tsp = ((struct namecache_ts *)ncp)->nc_time; 258 if (ticksp != NULL) 259 *ticksp = ((struct namecache_ts *)ncp)->nc_ticks; 260 } 261 262 static int doingcache = 1; /* 1 => enable the cache */ 263 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 264 "VFS namecache enabled"); 265 266 /* Export size information to userland */ 267 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 268 sizeof(struct namecache), "sizeof(struct namecache)"); 269 270 /* 271 * The new name cache statistics 272 */ 273 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, 274 "Name cache statistics"); 275 #define STATNODE(mode, name, var, descr) \ 276 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr); 277 STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries"); 278 STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries"); 279 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls, 280 "Number of cache lookups"); 281 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits, 282 "Number of '.' hits"); 283 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits, 284 "Number of '..' hits"); 285 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks, 286 "Number of checks in lookup"); 287 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss, 288 "Number of cache misses"); 289 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap, 290 "Number of cache misses we do not want to cache"); 291 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps, 292 "Number of cache hits (positive) we do not want to cache"); 293 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits, 294 "Number of cache hits (positive)"); 295 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps, 296 "Number of cache hits (negative) we do not want to cache"); 297 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits, 298 "Number of cache hits (negative)"); 299 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades, 300 "Number of updates of the cache after lookup (write lock + retry)"); 301 302 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE, 303 &nchstats, sizeof(nchstats), "LU", 304 "VFS cache effectiveness statistics"); 305 306 307 308 static void cache_zap(struct namecache *ncp); 309 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 310 u_int *buflen); 311 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 312 char *buf, char **retbuf, u_int buflen); 313 314 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 315 316 #ifdef DIAGNOSTIC 317 /* 318 * Grab an atomic snapshot of the name cache hash chain lengths 319 */ 320 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, 321 "hash table stats"); 322 323 static int 324 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 325 { 326 struct nchashhead *ncpp; 327 struct namecache *ncp; 328 int i, error, n_nchash, *cntbuf; 329 330 retry: 331 n_nchash = nchash + 1; /* nchash is max index, not count */ 332 if (req->oldptr == NULL) 333 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 334 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 335 CACHE_RLOCK(); 336 if (n_nchash != nchash + 1) { 337 CACHE_RUNLOCK(); 338 free(cntbuf, M_TEMP); 339 goto retry; 340 } 341 /* Scan hash tables counting entries */ 342 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 343 LIST_FOREACH(ncp, ncpp, nc_hash) 344 cntbuf[i]++; 345 CACHE_RUNLOCK(); 346 for (error = 0, i = 0; i < n_nchash; i++) 347 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 348 break; 349 free(cntbuf, M_TEMP); 350 return (error); 351 } 352 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 353 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 354 "nchash chain lengths"); 355 356 static int 357 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 358 { 359 int error; 360 struct nchashhead *ncpp; 361 struct namecache *ncp; 362 int n_nchash; 363 int count, maxlength, used, pct; 364 365 if (!req->oldptr) 366 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 367 368 CACHE_RLOCK(); 369 n_nchash = nchash + 1; /* nchash is max index, not count */ 370 used = 0; 371 maxlength = 0; 372 373 /* Scan hash tables for applicable entries */ 374 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 375 count = 0; 376 LIST_FOREACH(ncp, ncpp, nc_hash) { 377 count++; 378 } 379 if (count) 380 used++; 381 if (maxlength < count) 382 maxlength = count; 383 } 384 n_nchash = nchash + 1; 385 CACHE_RUNLOCK(); 386 pct = (used * 100) / (n_nchash / 100); 387 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 388 if (error) 389 return (error); 390 error = SYSCTL_OUT(req, &used, sizeof(used)); 391 if (error) 392 return (error); 393 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 394 if (error) 395 return (error); 396 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 397 if (error) 398 return (error); 399 return (0); 400 } 401 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 402 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 403 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 404 #endif 405 406 /* 407 * cache_zap(): 408 * 409 * Removes a namecache entry from cache, whether it contains an actual 410 * pointer to a vnode or if it is just a negative cache entry. 411 */ 412 static void 413 cache_zap(ncp) 414 struct namecache *ncp; 415 { 416 struct vnode *vp; 417 418 rw_assert(&cache_lock, RA_WLOCKED); 419 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); 420 #ifdef KDTRACE_HOOKS 421 if (ncp->nc_vp != NULL) { 422 SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp, 423 nc_get_name(ncp), ncp->nc_vp, 0, 0); 424 } else { 425 SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp, 426 nc_get_name(ncp), 0, 0, 0); 427 } 428 #endif 429 vp = NULL; 430 LIST_REMOVE(ncp, nc_hash); 431 if (ncp->nc_flag & NCF_ISDOTDOT) { 432 if (ncp == ncp->nc_dvp->v_cache_dd) 433 ncp->nc_dvp->v_cache_dd = NULL; 434 } else { 435 LIST_REMOVE(ncp, nc_src); 436 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 437 vp = ncp->nc_dvp; 438 numcachehv--; 439 } 440 } 441 if (ncp->nc_vp) { 442 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 443 if (ncp == ncp->nc_vp->v_cache_dd) 444 ncp->nc_vp->v_cache_dd = NULL; 445 } else { 446 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 447 numneg--; 448 } 449 numcache--; 450 cache_free(ncp); 451 if (vp) 452 vdrop(vp); 453 } 454 455 /* 456 * Lookup an entry in the cache 457 * 458 * Lookup is called with dvp pointing to the directory to search, 459 * cnp pointing to the name of the entry being sought. If the lookup 460 * succeeds, the vnode is returned in *vpp, and a status of -1 is 461 * returned. If the lookup determines that the name does not exist 462 * (negative cacheing), a status of ENOENT is returned. If the lookup 463 * fails, a status of zero is returned. If the directory vnode is 464 * recycled out from under us due to a forced unmount, a status of 465 * ENOENT is returned. 466 * 467 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 468 * unlocked. If we're looking up . an extra ref is taken, but the lock is 469 * not recursively acquired. 470 */ 471 472 int 473 cache_lookup(dvp, vpp, cnp, tsp, ticksp) 474 struct vnode *dvp; 475 struct vnode **vpp; 476 struct componentname *cnp; 477 struct timespec *tsp; 478 int *ticksp; 479 { 480 struct namecache *ncp; 481 uint32_t hash; 482 int error, ltype, wlocked; 483 484 if (!doingcache) { 485 cnp->cn_flags &= ~MAKEENTRY; 486 return (0); 487 } 488 retry: 489 CACHE_RLOCK(); 490 wlocked = 0; 491 numcalls++; 492 error = 0; 493 494 retry_wlocked: 495 if (cnp->cn_nameptr[0] == '.') { 496 if (cnp->cn_namelen == 1) { 497 *vpp = dvp; 498 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 499 dvp, cnp->cn_nameptr); 500 dothits++; 501 SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".", 502 *vpp, 0, 0); 503 if (tsp != NULL) 504 timespecclear(tsp); 505 if (ticksp != NULL) 506 *ticksp = ticks; 507 goto success; 508 } 509 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 510 dotdothits++; 511 if (dvp->v_cache_dd == NULL) { 512 SDT_PROBE(vfs, namecache, lookup, miss, dvp, 513 "..", NULL, 0, 0); 514 goto unlock; 515 } 516 if ((cnp->cn_flags & MAKEENTRY) == 0) { 517 if (!wlocked && !CACHE_UPGRADE_LOCK()) 518 goto wlock; 519 if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT) 520 cache_zap(dvp->v_cache_dd); 521 dvp->v_cache_dd = NULL; 522 CACHE_WUNLOCK(); 523 return (0); 524 } 525 ncp = dvp->v_cache_dd; 526 if (ncp->nc_flag & NCF_ISDOTDOT) 527 *vpp = ncp->nc_vp; 528 else 529 *vpp = ncp->nc_dvp; 530 /* Return failure if negative entry was found. */ 531 if (*vpp == NULL) 532 goto negative_success; 533 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 534 dvp, cnp->cn_nameptr, *vpp); 535 SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..", 536 *vpp, 0, 0); 537 cache_out_ts(ncp, tsp, ticksp); 538 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 539 NCF_DTS && tsp != NULL) 540 *tsp = ((struct namecache_ts *)ncp)-> 541 nc_dotdottime; 542 goto success; 543 } 544 } 545 546 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT); 547 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 548 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 549 numchecks++; 550 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 551 !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen)) 552 break; 553 } 554 555 /* We failed to find an entry */ 556 if (ncp == NULL) { 557 SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 558 NULL, 0, 0); 559 if ((cnp->cn_flags & MAKEENTRY) == 0) { 560 nummisszap++; 561 } else { 562 nummiss++; 563 } 564 nchstats.ncs_miss++; 565 goto unlock; 566 } 567 568 /* We don't want to have an entry, so dump it */ 569 if ((cnp->cn_flags & MAKEENTRY) == 0) { 570 numposzaps++; 571 nchstats.ncs_badhits++; 572 if (!wlocked && !CACHE_UPGRADE_LOCK()) 573 goto wlock; 574 cache_zap(ncp); 575 CACHE_WUNLOCK(); 576 return (0); 577 } 578 579 /* We found a "positive" match, return the vnode */ 580 if (ncp->nc_vp) { 581 numposhits++; 582 nchstats.ncs_goodhits++; 583 *vpp = ncp->nc_vp; 584 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 585 dvp, cnp->cn_nameptr, *vpp, ncp); 586 SDT_PROBE(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp), 587 *vpp, 0, 0); 588 cache_out_ts(ncp, tsp, ticksp); 589 goto success; 590 } 591 592 negative_success: 593 /* We found a negative match, and want to create it, so purge */ 594 if (cnp->cn_nameiop == CREATE) { 595 numnegzaps++; 596 nchstats.ncs_badhits++; 597 if (!wlocked && !CACHE_UPGRADE_LOCK()) 598 goto wlock; 599 cache_zap(ncp); 600 CACHE_WUNLOCK(); 601 return (0); 602 } 603 604 if (!wlocked && !CACHE_UPGRADE_LOCK()) 605 goto wlock; 606 numneghits++; 607 /* 608 * We found a "negative" match, so we shift it to the end of 609 * the "negative" cache entries queue to satisfy LRU. Also, 610 * check to see if the entry is a whiteout; indicate this to 611 * the componentname, if so. 612 */ 613 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 614 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 615 nchstats.ncs_neghits++; 616 if (ncp->nc_flag & NCF_WHITE) 617 cnp->cn_flags |= ISWHITEOUT; 618 SDT_PROBE(vfs, namecache, lookup, hit__negative, dvp, nc_get_name(ncp), 619 0, 0, 0); 620 cache_out_ts(ncp, tsp, ticksp); 621 CACHE_WUNLOCK(); 622 return (ENOENT); 623 624 wlock: 625 /* 626 * We need to update the cache after our lookup, so upgrade to 627 * a write lock and retry the operation. 628 */ 629 CACHE_RUNLOCK(); 630 CACHE_WLOCK(); 631 numupgrades++; 632 wlocked = 1; 633 goto retry_wlocked; 634 635 success: 636 /* 637 * On success we return a locked and ref'd vnode as per the lookup 638 * protocol. 639 */ 640 if (dvp == *vpp) { /* lookup on "." */ 641 VREF(*vpp); 642 if (wlocked) 643 CACHE_WUNLOCK(); 644 else 645 CACHE_RUNLOCK(); 646 /* 647 * When we lookup "." we still can be asked to lock it 648 * differently... 649 */ 650 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 651 if (ltype != VOP_ISLOCKED(*vpp)) { 652 if (ltype == LK_EXCLUSIVE) { 653 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 654 if ((*vpp)->v_iflag & VI_DOOMED) { 655 /* forced unmount */ 656 vrele(*vpp); 657 *vpp = NULL; 658 return (ENOENT); 659 } 660 } else 661 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 662 } 663 return (-1); 664 } 665 ltype = 0; /* silence gcc warning */ 666 if (cnp->cn_flags & ISDOTDOT) { 667 ltype = VOP_ISLOCKED(dvp); 668 VOP_UNLOCK(dvp, 0); 669 } 670 vhold(*vpp); 671 if (wlocked) 672 CACHE_WUNLOCK(); 673 else 674 CACHE_RUNLOCK(); 675 error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread); 676 if (cnp->cn_flags & ISDOTDOT) { 677 vn_lock(dvp, ltype | LK_RETRY); 678 if (dvp->v_iflag & VI_DOOMED) { 679 if (error == 0) 680 vput(*vpp); 681 *vpp = NULL; 682 return (ENOENT); 683 } 684 } 685 if (error) { 686 *vpp = NULL; 687 goto retry; 688 } 689 if ((cnp->cn_flags & ISLASTCN) && 690 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 691 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 692 } 693 return (-1); 694 695 unlock: 696 if (wlocked) 697 CACHE_WUNLOCK(); 698 else 699 CACHE_RUNLOCK(); 700 return (0); 701 } 702 703 /* 704 * Add an entry to the cache. 705 */ 706 void 707 cache_enter_time(dvp, vp, cnp, tsp, dtsp) 708 struct vnode *dvp; 709 struct vnode *vp; 710 struct componentname *cnp; 711 struct timespec *tsp; 712 struct timespec *dtsp; 713 { 714 struct namecache *ncp, *n2; 715 struct namecache_ts *n3; 716 struct nchashhead *ncpp; 717 uint32_t hash; 718 int flag; 719 int hold; 720 int zap; 721 int len; 722 723 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 724 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 725 ("cache_enter: Adding a doomed vnode")); 726 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, 727 ("cache_enter: Doomed vnode used as src")); 728 729 if (!doingcache) 730 return; 731 732 /* 733 * Avoid blowout in namecache entries. 734 */ 735 if (numcache >= desiredvnodes * ncsizefactor) 736 return; 737 738 flag = 0; 739 if (cnp->cn_nameptr[0] == '.') { 740 if (cnp->cn_namelen == 1) 741 return; 742 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 743 CACHE_WLOCK(); 744 /* 745 * If dotdot entry already exists, just retarget it 746 * to new parent vnode, otherwise continue with new 747 * namecache entry allocation. 748 */ 749 if ((ncp = dvp->v_cache_dd) != NULL && 750 ncp->nc_flag & NCF_ISDOTDOT) { 751 KASSERT(ncp->nc_dvp == dvp, 752 ("wrong isdotdot parent")); 753 if (ncp->nc_vp != NULL) { 754 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, 755 ncp, nc_dst); 756 } else { 757 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 758 numneg--; 759 } 760 if (vp != NULL) { 761 TAILQ_INSERT_HEAD(&vp->v_cache_dst, 762 ncp, nc_dst); 763 } else { 764 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 765 numneg++; 766 } 767 ncp->nc_vp = vp; 768 CACHE_WUNLOCK(); 769 return; 770 } 771 dvp->v_cache_dd = NULL; 772 SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp, 773 0, 0); 774 CACHE_WUNLOCK(); 775 flag = NCF_ISDOTDOT; 776 } 777 } 778 779 hold = 0; 780 zap = 0; 781 782 /* 783 * Calculate the hash key and setup as much of the new 784 * namecache entry as possible before acquiring the lock. 785 */ 786 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 787 ncp->nc_vp = vp; 788 ncp->nc_dvp = dvp; 789 ncp->nc_flag = flag; 790 if (tsp != NULL) { 791 n3 = (struct namecache_ts *)ncp; 792 n3->nc_time = *tsp; 793 n3->nc_ticks = ticks; 794 n3->nc_flag |= NCF_TS; 795 if (dtsp != NULL) { 796 n3->nc_dotdottime = *dtsp; 797 n3->nc_flag |= NCF_DTS; 798 } 799 } 800 len = ncp->nc_nlen = cnp->cn_namelen; 801 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT); 802 strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1); 803 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 804 CACHE_WLOCK(); 805 806 /* 807 * See if this vnode or negative entry is already in the cache 808 * with this name. This can happen with concurrent lookups of 809 * the same path name. 810 */ 811 ncpp = NCHHASH(hash); 812 LIST_FOREACH(n2, ncpp, nc_hash) { 813 if (n2->nc_dvp == dvp && 814 n2->nc_nlen == cnp->cn_namelen && 815 !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) { 816 if (tsp != NULL) { 817 KASSERT((n2->nc_flag & NCF_TS) != 0, 818 ("no NCF_TS")); 819 n3 = (struct namecache_ts *)n2; 820 n3->nc_time = 821 ((struct namecache_ts *)ncp)->nc_time; 822 n3->nc_ticks = 823 ((struct namecache_ts *)ncp)->nc_ticks; 824 if (dtsp != NULL) { 825 n3->nc_dotdottime = 826 ((struct namecache_ts *)ncp)-> 827 nc_dotdottime; 828 n3->nc_flag |= NCF_DTS; 829 } 830 } 831 CACHE_WUNLOCK(); 832 cache_free(ncp); 833 return; 834 } 835 } 836 837 if (flag == NCF_ISDOTDOT) { 838 /* 839 * See if we are trying to add .. entry, but some other lookup 840 * has populated v_cache_dd pointer already. 841 */ 842 if (dvp->v_cache_dd != NULL) { 843 CACHE_WUNLOCK(); 844 cache_free(ncp); 845 return; 846 } 847 KASSERT(vp == NULL || vp->v_type == VDIR, 848 ("wrong vnode type %p", vp)); 849 dvp->v_cache_dd = ncp; 850 } 851 852 numcache++; 853 if (!vp) { 854 numneg++; 855 if (cnp->cn_flags & ISWHITEOUT) 856 ncp->nc_flag |= NCF_WHITE; 857 } else if (vp->v_type == VDIR) { 858 if (flag != NCF_ISDOTDOT) { 859 /* 860 * For this case, the cache entry maps both the 861 * directory name in it and the name ".." for the 862 * directory's parent. 863 */ 864 if ((n2 = vp->v_cache_dd) != NULL && 865 (n2->nc_flag & NCF_ISDOTDOT) != 0) 866 cache_zap(n2); 867 vp->v_cache_dd = ncp; 868 } 869 } else { 870 vp->v_cache_dd = NULL; 871 } 872 873 /* 874 * Insert the new namecache entry into the appropriate chain 875 * within the cache entries table. 876 */ 877 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 878 if (flag != NCF_ISDOTDOT) { 879 if (LIST_EMPTY(&dvp->v_cache_src)) { 880 hold = 1; 881 numcachehv++; 882 } 883 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 884 } 885 886 /* 887 * If the entry is "negative", we place it into the 888 * "negative" cache queue, otherwise, we place it into the 889 * destination vnode's cache entries queue. 890 */ 891 if (vp) { 892 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 893 SDT_PROBE(vfs, namecache, enter, done, dvp, nc_get_name(ncp), 894 vp, 0, 0); 895 } else { 896 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 897 SDT_PROBE(vfs, namecache, enter_negative, done, dvp, 898 nc_get_name(ncp), 0, 0, 0); 899 } 900 if (numneg * ncnegfactor > numcache) { 901 ncp = TAILQ_FIRST(&ncneg); 902 KASSERT(ncp->nc_vp == NULL, ("ncp %p vp %p on ncneg", 903 ncp, ncp->nc_vp)); 904 zap = 1; 905 } 906 if (hold) 907 vhold(dvp); 908 if (zap) 909 cache_zap(ncp); 910 CACHE_WUNLOCK(); 911 } 912 913 /* 914 * Name cache initialization, from vfs_init() when we are booting 915 */ 916 static void 917 nchinit(void *dummy __unused) 918 { 919 920 TAILQ_INIT(&ncneg); 921 922 cache_zone_small = uma_zcreate("S VFS Cache", 923 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1, 924 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 925 cache_zone_small_ts = uma_zcreate("STS VFS Cache", 926 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1, 927 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 928 cache_zone_large = uma_zcreate("L VFS Cache", 929 sizeof(struct namecache) + NAME_MAX + 1, 930 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 931 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", 932 sizeof(struct namecache_ts) + NAME_MAX + 1, 933 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 934 935 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 936 } 937 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 938 939 void 940 cache_changesize(int newmaxvnodes) 941 { 942 struct nchashhead *new_nchashtbl, *old_nchashtbl; 943 u_long new_nchash, old_nchash; 944 struct namecache *ncp; 945 uint32_t hash; 946 int i; 947 948 new_nchashtbl = hashinit(newmaxvnodes * 2, M_VFSCACHE, &new_nchash); 949 /* If same hash table size, nothing to do */ 950 if (nchash == new_nchash) { 951 free(new_nchashtbl, M_VFSCACHE); 952 return; 953 } 954 /* 955 * Move everything from the old hash table to the new table. 956 * None of the namecache entries in the table can be removed 957 * because to do so, they have to be removed from the hash table. 958 */ 959 CACHE_WLOCK(); 960 old_nchashtbl = nchashtbl; 961 old_nchash = nchash; 962 nchashtbl = new_nchashtbl; 963 nchash = new_nchash; 964 for (i = 0; i <= old_nchash; i++) { 965 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) { 966 hash = fnv_32_buf(nc_get_name(ncp), ncp->nc_nlen, 967 FNV1_32_INIT); 968 hash = fnv_32_buf(&ncp->nc_dvp, sizeof(ncp->nc_dvp), 969 hash); 970 LIST_REMOVE(ncp, nc_hash); 971 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash); 972 } 973 } 974 CACHE_WUNLOCK(); 975 free(old_nchashtbl, M_VFSCACHE); 976 } 977 978 /* 979 * Invalidate all entries to a particular vnode. 980 */ 981 void 982 cache_purge(vp) 983 struct vnode *vp; 984 { 985 986 CTR1(KTR_VFS, "cache_purge(%p)", vp); 987 SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0); 988 CACHE_WLOCK(); 989 while (!LIST_EMPTY(&vp->v_cache_src)) 990 cache_zap(LIST_FIRST(&vp->v_cache_src)); 991 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 992 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 993 if (vp->v_cache_dd != NULL) { 994 KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT, 995 ("lost dotdot link")); 996 cache_zap(vp->v_cache_dd); 997 } 998 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 999 CACHE_WUNLOCK(); 1000 } 1001 1002 /* 1003 * Invalidate all negative entries for a particular directory vnode. 1004 */ 1005 void 1006 cache_purge_negative(vp) 1007 struct vnode *vp; 1008 { 1009 struct namecache *cp, *ncp; 1010 1011 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp); 1012 SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0); 1013 CACHE_WLOCK(); 1014 LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) { 1015 if (cp->nc_vp == NULL) 1016 cache_zap(cp); 1017 } 1018 CACHE_WUNLOCK(); 1019 } 1020 1021 /* 1022 * Flush all entries referencing a particular filesystem. 1023 */ 1024 void 1025 cache_purgevfs(mp) 1026 struct mount *mp; 1027 { 1028 struct nchashhead *ncpp; 1029 struct namecache *ncp, *nnp; 1030 1031 /* Scan hash tables for applicable entries */ 1032 SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0); 1033 CACHE_WLOCK(); 1034 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 1035 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) { 1036 if (ncp->nc_dvp->v_mount == mp) 1037 cache_zap(ncp); 1038 } 1039 } 1040 CACHE_WUNLOCK(); 1041 } 1042 1043 /* 1044 * Perform canonical checks and cache lookup and pass on to filesystem 1045 * through the vop_cachedlookup only if needed. 1046 */ 1047 1048 int 1049 vfs_cache_lookup(ap) 1050 struct vop_lookup_args /* { 1051 struct vnode *a_dvp; 1052 struct vnode **a_vpp; 1053 struct componentname *a_cnp; 1054 } */ *ap; 1055 { 1056 struct vnode *dvp; 1057 int error; 1058 struct vnode **vpp = ap->a_vpp; 1059 struct componentname *cnp = ap->a_cnp; 1060 struct ucred *cred = cnp->cn_cred; 1061 int flags = cnp->cn_flags; 1062 struct thread *td = cnp->cn_thread; 1063 1064 *vpp = NULL; 1065 dvp = ap->a_dvp; 1066 1067 if (dvp->v_type != VDIR) 1068 return (ENOTDIR); 1069 1070 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 1071 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1072 return (EROFS); 1073 1074 error = VOP_ACCESS(dvp, VEXEC, cred, td); 1075 if (error) 1076 return (error); 1077 1078 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 1079 if (error == 0) 1080 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 1081 if (error == -1) 1082 return (0); 1083 return (error); 1084 } 1085 1086 /* 1087 * XXX All of these sysctls would probably be more productive dead. 1088 */ 1089 static int disablecwd; 1090 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 1091 "Disable the getcwd syscall"); 1092 1093 /* Implementation of the getcwd syscall. */ 1094 int 1095 sys___getcwd(td, uap) 1096 struct thread *td; 1097 struct __getcwd_args *uap; 1098 { 1099 1100 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen, 1101 MAXPATHLEN)); 1102 } 1103 1104 int 1105 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen, 1106 u_int path_max) 1107 { 1108 char *bp, *tmpbuf; 1109 struct filedesc *fdp; 1110 struct vnode *cdir, *rdir; 1111 int error; 1112 1113 if (disablecwd) 1114 return (ENODEV); 1115 if (buflen < 2) 1116 return (EINVAL); 1117 if (buflen > path_max) 1118 buflen = path_max; 1119 1120 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 1121 fdp = td->td_proc->p_fd; 1122 FILEDESC_SLOCK(fdp); 1123 cdir = fdp->fd_cdir; 1124 VREF(cdir); 1125 rdir = fdp->fd_rdir; 1126 VREF(rdir); 1127 FILEDESC_SUNLOCK(fdp); 1128 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 1129 vrele(rdir); 1130 vrele(cdir); 1131 1132 if (!error) { 1133 if (bufseg == UIO_SYSSPACE) 1134 bcopy(bp, buf, strlen(bp) + 1); 1135 else 1136 error = copyout(bp, buf, strlen(bp) + 1); 1137 #ifdef KTRACE 1138 if (KTRPOINT(curthread, KTR_NAMEI)) 1139 ktrnamei(bp); 1140 #endif 1141 } 1142 free(tmpbuf, M_TEMP); 1143 return (error); 1144 } 1145 1146 /* 1147 * Thus begins the fullpath magic. 1148 */ 1149 1150 #undef STATNODE 1151 #define STATNODE(name, descr) \ 1152 static u_int name; \ 1153 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr) 1154 1155 static int disablefullpath; 1156 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 1157 "Disable the vn_fullpath function"); 1158 1159 /* These count for kern___getcwd(), too. */ 1160 STATNODE(numfullpathcalls, "Number of fullpath search calls"); 1161 STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 1162 STATNODE(numfullpathfail2, 1163 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 1164 STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 1165 STATNODE(numfullpathfound, "Number of successful fullpath calls"); 1166 1167 /* 1168 * Retrieve the full filesystem path that correspond to a vnode from the name 1169 * cache (if available) 1170 */ 1171 int 1172 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 1173 { 1174 char *buf; 1175 struct filedesc *fdp; 1176 struct vnode *rdir; 1177 int error; 1178 1179 if (disablefullpath) 1180 return (ENODEV); 1181 if (vn == NULL) 1182 return (EINVAL); 1183 1184 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1185 fdp = td->td_proc->p_fd; 1186 FILEDESC_SLOCK(fdp); 1187 rdir = fdp->fd_rdir; 1188 VREF(rdir); 1189 FILEDESC_SUNLOCK(fdp); 1190 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 1191 vrele(rdir); 1192 1193 if (!error) 1194 *freebuf = buf; 1195 else 1196 free(buf, M_TEMP); 1197 return (error); 1198 } 1199 1200 /* 1201 * This function is similar to vn_fullpath, but it attempts to lookup the 1202 * pathname relative to the global root mount point. This is required for the 1203 * auditing sub-system, as audited pathnames must be absolute, relative to the 1204 * global root mount point. 1205 */ 1206 int 1207 vn_fullpath_global(struct thread *td, struct vnode *vn, 1208 char **retbuf, char **freebuf) 1209 { 1210 char *buf; 1211 int error; 1212 1213 if (disablefullpath) 1214 return (ENODEV); 1215 if (vn == NULL) 1216 return (EINVAL); 1217 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1218 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 1219 if (!error) 1220 *freebuf = buf; 1221 else 1222 free(buf, M_TEMP); 1223 return (error); 1224 } 1225 1226 int 1227 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) 1228 { 1229 int error; 1230 1231 CACHE_RLOCK(); 1232 error = vn_vptocnp_locked(vp, cred, buf, buflen); 1233 if (error == 0) 1234 CACHE_RUNLOCK(); 1235 return (error); 1236 } 1237 1238 static int 1239 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 1240 u_int *buflen) 1241 { 1242 struct vnode *dvp; 1243 struct namecache *ncp; 1244 int error; 1245 1246 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) { 1247 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1248 break; 1249 } 1250 if (ncp != NULL) { 1251 if (*buflen < ncp->nc_nlen) { 1252 CACHE_RUNLOCK(); 1253 vrele(*vp); 1254 numfullpathfail4++; 1255 error = ENOMEM; 1256 SDT_PROBE(vfs, namecache, fullpath, return, error, 1257 vp, NULL, 0, 0); 1258 return (error); 1259 } 1260 *buflen -= ncp->nc_nlen; 1261 memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen); 1262 SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp, 1263 nc_get_name(ncp), vp, 0, 0); 1264 dvp = *vp; 1265 *vp = ncp->nc_dvp; 1266 vref(*vp); 1267 CACHE_RUNLOCK(); 1268 vrele(dvp); 1269 CACHE_RLOCK(); 1270 return (0); 1271 } 1272 SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0); 1273 1274 CACHE_RUNLOCK(); 1275 vn_lock(*vp, LK_SHARED | LK_RETRY); 1276 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen); 1277 vput(*vp); 1278 if (error) { 1279 numfullpathfail2++; 1280 SDT_PROBE(vfs, namecache, fullpath, return, error, vp, 1281 NULL, 0, 0); 1282 return (error); 1283 } 1284 1285 *vp = dvp; 1286 CACHE_RLOCK(); 1287 if (dvp->v_iflag & VI_DOOMED) { 1288 /* forced unmount */ 1289 CACHE_RUNLOCK(); 1290 vrele(dvp); 1291 error = ENOENT; 1292 SDT_PROBE(vfs, namecache, fullpath, return, error, vp, 1293 NULL, 0, 0); 1294 return (error); 1295 } 1296 /* 1297 * *vp has its use count incremented still. 1298 */ 1299 1300 return (0); 1301 } 1302 1303 /* 1304 * The magic behind kern___getcwd() and vn_fullpath(). 1305 */ 1306 static int 1307 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 1308 char *buf, char **retbuf, u_int buflen) 1309 { 1310 int error, slash_prefixed; 1311 #ifdef KDTRACE_HOOKS 1312 struct vnode *startvp = vp; 1313 #endif 1314 struct vnode *vp1; 1315 1316 buflen--; 1317 buf[buflen] = '\0'; 1318 error = 0; 1319 slash_prefixed = 0; 1320 1321 SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0); 1322 numfullpathcalls++; 1323 vref(vp); 1324 CACHE_RLOCK(); 1325 if (vp->v_type != VDIR) { 1326 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1327 if (error) 1328 return (error); 1329 if (buflen == 0) { 1330 CACHE_RUNLOCK(); 1331 vrele(vp); 1332 return (ENOMEM); 1333 } 1334 buf[--buflen] = '/'; 1335 slash_prefixed = 1; 1336 } 1337 while (vp != rdir && vp != rootvnode) { 1338 if (vp->v_vflag & VV_ROOT) { 1339 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */ 1340 CACHE_RUNLOCK(); 1341 vrele(vp); 1342 error = ENOENT; 1343 SDT_PROBE(vfs, namecache, fullpath, return, 1344 error, vp, NULL, 0, 0); 1345 break; 1346 } 1347 vp1 = vp->v_mount->mnt_vnodecovered; 1348 vref(vp1); 1349 CACHE_RUNLOCK(); 1350 vrele(vp); 1351 vp = vp1; 1352 CACHE_RLOCK(); 1353 continue; 1354 } 1355 if (vp->v_type != VDIR) { 1356 CACHE_RUNLOCK(); 1357 vrele(vp); 1358 numfullpathfail1++; 1359 error = ENOTDIR; 1360 SDT_PROBE(vfs, namecache, fullpath, return, 1361 error, vp, NULL, 0, 0); 1362 break; 1363 } 1364 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1365 if (error) 1366 break; 1367 if (buflen == 0) { 1368 CACHE_RUNLOCK(); 1369 vrele(vp); 1370 error = ENOMEM; 1371 SDT_PROBE(vfs, namecache, fullpath, return, error, 1372 startvp, NULL, 0, 0); 1373 break; 1374 } 1375 buf[--buflen] = '/'; 1376 slash_prefixed = 1; 1377 } 1378 if (error) 1379 return (error); 1380 if (!slash_prefixed) { 1381 if (buflen == 0) { 1382 CACHE_RUNLOCK(); 1383 vrele(vp); 1384 numfullpathfail4++; 1385 SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM, 1386 startvp, NULL, 0, 0); 1387 return (ENOMEM); 1388 } 1389 buf[--buflen] = '/'; 1390 } 1391 numfullpathfound++; 1392 CACHE_RUNLOCK(); 1393 vrele(vp); 1394 1395 SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen, 1396 0, 0); 1397 *retbuf = buf + buflen; 1398 return (0); 1399 } 1400 1401 struct vnode * 1402 vn_dir_dd_ino(struct vnode *vp) 1403 { 1404 struct namecache *ncp; 1405 struct vnode *ddvp; 1406 1407 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 1408 CACHE_RLOCK(); 1409 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 1410 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 1411 continue; 1412 ddvp = ncp->nc_dvp; 1413 vhold(ddvp); 1414 CACHE_RUNLOCK(); 1415 if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread)) 1416 return (NULL); 1417 return (ddvp); 1418 } 1419 CACHE_RUNLOCK(); 1420 return (NULL); 1421 } 1422 1423 int 1424 vn_commname(struct vnode *vp, char *buf, u_int buflen) 1425 { 1426 struct namecache *ncp; 1427 int l; 1428 1429 CACHE_RLOCK(); 1430 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 1431 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1432 break; 1433 if (ncp == NULL) { 1434 CACHE_RUNLOCK(); 1435 return (ENOENT); 1436 } 1437 l = min(ncp->nc_nlen, buflen - 1); 1438 memcpy(buf, nc_get_name(ncp), l); 1439 CACHE_RUNLOCK(); 1440 buf[l] = '\0'; 1441 return (0); 1442 } 1443 1444 /* ABI compat shims for old kernel modules. */ 1445 #undef cache_enter 1446 1447 void cache_enter(struct vnode *dvp, struct vnode *vp, 1448 struct componentname *cnp); 1449 1450 void 1451 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 1452 { 1453 1454 cache_enter_time(dvp, vp, cnp, NULL, NULL); 1455 } 1456 1457 /* 1458 * This function updates path string to vnode's full global path 1459 * and checks the size of the new path string against the pathlen argument. 1460 * 1461 * Requires a locked, referenced vnode. 1462 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 1463 * 1464 * If sysctl debug.disablefullpath is set, ENODEV is returned, 1465 * vnode is left locked and path remain untouched. 1466 * 1467 * If vp is a directory, the call to vn_fullpath_global() always succeeds 1468 * because it falls back to the ".." lookup if the namecache lookup fails. 1469 */ 1470 int 1471 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 1472 u_int pathlen) 1473 { 1474 struct nameidata nd; 1475 struct vnode *vp1; 1476 char *rpath, *fbuf; 1477 int error; 1478 1479 ASSERT_VOP_ELOCKED(vp, __func__); 1480 1481 /* Return ENODEV if sysctl debug.disablefullpath==1 */ 1482 if (disablefullpath) 1483 return (ENODEV); 1484 1485 /* Construct global filesystem path from vp. */ 1486 VOP_UNLOCK(vp, 0); 1487 error = vn_fullpath_global(td, vp, &rpath, &fbuf); 1488 1489 if (error != 0) { 1490 vrele(vp); 1491 return (error); 1492 } 1493 1494 if (strlen(rpath) >= pathlen) { 1495 vrele(vp); 1496 error = ENAMETOOLONG; 1497 goto out; 1498 } 1499 1500 /* 1501 * Re-lookup the vnode by path to detect a possible rename. 1502 * As a side effect, the vnode is relocked. 1503 * If vnode was renamed, return ENOENT. 1504 */ 1505 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 1506 UIO_SYSSPACE, path, td); 1507 error = namei(&nd); 1508 if (error != 0) { 1509 vrele(vp); 1510 goto out; 1511 } 1512 NDFREE(&nd, NDF_ONLY_PNBUF); 1513 vp1 = nd.ni_vp; 1514 vrele(vp); 1515 if (vp1 == vp) 1516 strcpy(path, rpath); 1517 else { 1518 vput(vp1); 1519 error = ENOENT; 1520 } 1521 1522 out: 1523 free(fbuf, M_TEMP); 1524 return (error); 1525 } 1526