1 /*- 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/fnv_hash.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/fcntl.h> 48 #include <sys/mount.h> 49 #include <sys/namei.h> 50 #include <sys/proc.h> 51 #include <sys/rwlock.h> 52 #include <sys/sdt.h> 53 #include <sys/syscallsubr.h> 54 #include <sys/sysctl.h> 55 #include <sys/sysproto.h> 56 #include <sys/vnode.h> 57 #ifdef KTRACE 58 #include <sys/ktrace.h> 59 #endif 60 61 #include <vm/uma.h> 62 63 SDT_PROVIDER_DECLARE(vfs); 64 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 65 "struct vnode *"); 66 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 67 "char *"); 68 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 69 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 70 "char *", "struct vnode *"); 71 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 72 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 73 "struct vnode *", "char *"); 74 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 75 "struct vnode *"); 76 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 77 "struct vnode *", "char *"); 78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 79 "char *"); 80 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *"); 81 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 82 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 83 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 84 "struct vnode *"); 85 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", 86 "char *"); 87 88 /* 89 * This structure describes the elements in the cache of recent 90 * names looked up by namei. 91 */ 92 93 struct namecache { 94 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 95 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 96 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 97 struct vnode *nc_dvp; /* vnode of parent of name */ 98 struct vnode *nc_vp; /* vnode the name refers to */ 99 u_char nc_flag; /* flag bits */ 100 u_char nc_nlen; /* length of name */ 101 char nc_name[0]; /* segment name + nul */ 102 }; 103 104 /* 105 * struct namecache_ts repeats struct namecache layout up to the 106 * nc_nlen member. 107 * struct namecache_ts is used in place of struct namecache when time(s) need 108 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 109 * both a non-dotdot directory name plus dotdot for the directory's 110 * parent. 111 */ 112 struct namecache_ts { 113 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 114 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 115 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 116 struct vnode *nc_dvp; /* vnode of parent of name */ 117 struct vnode *nc_vp; /* vnode the name refers to */ 118 u_char nc_flag; /* flag bits */ 119 u_char nc_nlen; /* length of name */ 120 struct timespec nc_time; /* timespec provided by fs */ 121 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 122 int nc_ticks; /* ticks value when entry was added */ 123 char nc_name[0]; /* segment name + nul */ 124 }; 125 126 /* 127 * Flags in namecache.nc_flag 128 */ 129 #define NCF_WHITE 0x01 130 #define NCF_ISDOTDOT 0x02 131 #define NCF_TS 0x04 132 #define NCF_DTS 0x08 133 134 /* 135 * Name caching works as follows: 136 * 137 * Names found by directory scans are retained in a cache 138 * for future reference. It is managed LRU, so frequently 139 * used names will hang around. Cache is indexed by hash value 140 * obtained from (vp, name) where vp refers to the directory 141 * containing name. 142 * 143 * If it is a "negative" entry, (i.e. for a name that is known NOT to 144 * exist) the vnode pointer will be NULL. 145 * 146 * Upon reaching the last segment of a path, if the reference 147 * is for DELETE, or NOCACHE is set (rewrite), and the 148 * name is located in the cache, it will be dropped. 149 */ 150 151 /* 152 * Structures associated with name cacheing. 153 */ 154 #define NCHHASH(hash) \ 155 (&nchashtbl[(hash) & nchash]) 156 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 157 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 158 static u_long nchash; /* size of hash table */ 159 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 160 "Size of namecache hash table"); 161 static u_long ncnegfactor = 16; /* ratio of negative entries */ 162 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 163 "Ratio of negative namecache entries"); 164 static u_long numneg; /* number of negative entries allocated */ 165 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 166 "Number of negative entries in namecache"); 167 static u_long numcache; /* number of cache entries allocated */ 168 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 169 "Number of namecache entries"); 170 static u_long numcachehv; /* number of cache entries with vnodes held */ 171 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, 172 "Number of namecache entries with vnodes held"); 173 static u_int ncsizefactor = 2; 174 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0, 175 "Size factor for namecache"); 176 177 struct nchstats nchstats; /* cache effectiveness statistics */ 178 179 static struct rwlock cache_lock; 180 RW_SYSINIT(vfscache, &cache_lock, "Name Cache"); 181 182 #define CACHE_UPGRADE_LOCK() rw_try_upgrade(&cache_lock) 183 #define CACHE_RLOCK() rw_rlock(&cache_lock) 184 #define CACHE_RUNLOCK() rw_runlock(&cache_lock) 185 #define CACHE_WLOCK() rw_wlock(&cache_lock) 186 #define CACHE_WUNLOCK() rw_wunlock(&cache_lock) 187 188 /* 189 * UMA zones for the VFS cache. 190 * 191 * The small cache is used for entries with short names, which are the 192 * most common. The large cache is used for entries which are too big to 193 * fit in the small cache. 194 */ 195 static uma_zone_t cache_zone_small; 196 static uma_zone_t cache_zone_small_ts; 197 static uma_zone_t cache_zone_large; 198 static uma_zone_t cache_zone_large_ts; 199 200 #define CACHE_PATH_CUTOFF 35 201 202 static struct namecache * 203 cache_alloc(int len, int ts) 204 { 205 206 if (len > CACHE_PATH_CUTOFF) { 207 if (ts) 208 return (uma_zalloc(cache_zone_large_ts, M_WAITOK)); 209 else 210 return (uma_zalloc(cache_zone_large, M_WAITOK)); 211 } 212 if (ts) 213 return (uma_zalloc(cache_zone_small_ts, M_WAITOK)); 214 else 215 return (uma_zalloc(cache_zone_small, M_WAITOK)); 216 } 217 218 static void 219 cache_free(struct namecache *ncp) 220 { 221 int ts; 222 223 if (ncp == NULL) 224 return; 225 ts = ncp->nc_flag & NCF_TS; 226 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) { 227 if (ts) 228 uma_zfree(cache_zone_small_ts, ncp); 229 else 230 uma_zfree(cache_zone_small, ncp); 231 } else if (ts) 232 uma_zfree(cache_zone_large_ts, ncp); 233 else 234 uma_zfree(cache_zone_large, ncp); 235 } 236 237 static char * 238 nc_get_name(struct namecache *ncp) 239 { 240 struct namecache_ts *ncp_ts; 241 242 if ((ncp->nc_flag & NCF_TS) == 0) 243 return (ncp->nc_name); 244 ncp_ts = (struct namecache_ts *)ncp; 245 return (ncp_ts->nc_name); 246 } 247 248 static void 249 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 250 { 251 252 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 253 (tsp == NULL && ticksp == NULL), 254 ("No NCF_TS")); 255 256 if (tsp != NULL) 257 *tsp = ((struct namecache_ts *)ncp)->nc_time; 258 if (ticksp != NULL) 259 *ticksp = ((struct namecache_ts *)ncp)->nc_ticks; 260 } 261 262 static int doingcache = 1; /* 1 => enable the cache */ 263 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 264 "VFS namecache enabled"); 265 266 /* Export size information to userland */ 267 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 268 sizeof(struct namecache), "sizeof(struct namecache)"); 269 270 /* 271 * The new name cache statistics 272 */ 273 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, 274 "Name cache statistics"); 275 #define STATNODE(mode, name, var, descr) \ 276 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr); 277 STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries"); 278 STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries"); 279 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls, 280 "Number of cache lookups"); 281 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits, 282 "Number of '.' hits"); 283 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits, 284 "Number of '..' hits"); 285 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks, 286 "Number of checks in lookup"); 287 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss, 288 "Number of cache misses"); 289 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap, 290 "Number of cache misses we do not want to cache"); 291 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps, 292 "Number of cache hits (positive) we do not want to cache"); 293 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits, 294 "Number of cache hits (positive)"); 295 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps, 296 "Number of cache hits (negative) we do not want to cache"); 297 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits, 298 "Number of cache hits (negative)"); 299 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades, 300 "Number of updates of the cache after lookup (write lock + retry)"); 301 302 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE, 303 &nchstats, sizeof(nchstats), "LU", 304 "VFS cache effectiveness statistics"); 305 306 307 308 static void cache_zap(struct namecache *ncp); 309 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 310 u_int *buflen); 311 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 312 char *buf, char **retbuf, u_int buflen); 313 314 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 315 316 #ifdef DIAGNOSTIC 317 /* 318 * Grab an atomic snapshot of the name cache hash chain lengths 319 */ 320 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, 321 "hash table stats"); 322 323 static int 324 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 325 { 326 struct nchashhead *ncpp; 327 struct namecache *ncp; 328 int i, error, n_nchash, *cntbuf; 329 330 n_nchash = nchash + 1; /* nchash is max index, not count */ 331 if (req->oldptr == NULL) 332 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 333 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 334 CACHE_RLOCK(); 335 /* Scan hash tables counting entries */ 336 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 337 LIST_FOREACH(ncp, ncpp, nc_hash) 338 cntbuf[i]++; 339 CACHE_RUNLOCK(); 340 for (error = 0, i = 0; i < n_nchash; i++) 341 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 342 break; 343 free(cntbuf, M_TEMP); 344 return (error); 345 } 346 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 347 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 348 "nchash chain lengths"); 349 350 static int 351 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 352 { 353 int error; 354 struct nchashhead *ncpp; 355 struct namecache *ncp; 356 int n_nchash; 357 int count, maxlength, used, pct; 358 359 if (!req->oldptr) 360 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 361 362 CACHE_RLOCK(); 363 n_nchash = nchash + 1; /* nchash is max index, not count */ 364 used = 0; 365 maxlength = 0; 366 367 /* Scan hash tables for applicable entries */ 368 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 369 count = 0; 370 LIST_FOREACH(ncp, ncpp, nc_hash) { 371 count++; 372 } 373 if (count) 374 used++; 375 if (maxlength < count) 376 maxlength = count; 377 } 378 n_nchash = nchash + 1; 379 CACHE_RUNLOCK(); 380 pct = (used * 100) / (n_nchash / 100); 381 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 382 if (error) 383 return (error); 384 error = SYSCTL_OUT(req, &used, sizeof(used)); 385 if (error) 386 return (error); 387 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 388 if (error) 389 return (error); 390 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 391 if (error) 392 return (error); 393 return (0); 394 } 395 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 396 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 397 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 398 #endif 399 400 /* 401 * cache_zap(): 402 * 403 * Removes a namecache entry from cache, whether it contains an actual 404 * pointer to a vnode or if it is just a negative cache entry. 405 */ 406 static void 407 cache_zap(ncp) 408 struct namecache *ncp; 409 { 410 struct vnode *vp; 411 412 rw_assert(&cache_lock, RA_WLOCKED); 413 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); 414 #ifdef KDTRACE_HOOKS 415 if (ncp->nc_vp != NULL) { 416 SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp, 417 nc_get_name(ncp), ncp->nc_vp, 0, 0); 418 } else { 419 SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp, 420 nc_get_name(ncp), 0, 0, 0); 421 } 422 #endif 423 vp = NULL; 424 LIST_REMOVE(ncp, nc_hash); 425 if (ncp->nc_flag & NCF_ISDOTDOT) { 426 if (ncp == ncp->nc_dvp->v_cache_dd) 427 ncp->nc_dvp->v_cache_dd = NULL; 428 } else { 429 LIST_REMOVE(ncp, nc_src); 430 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 431 vp = ncp->nc_dvp; 432 numcachehv--; 433 } 434 } 435 if (ncp->nc_vp) { 436 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 437 if (ncp == ncp->nc_vp->v_cache_dd) 438 ncp->nc_vp->v_cache_dd = NULL; 439 } else { 440 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 441 numneg--; 442 } 443 numcache--; 444 cache_free(ncp); 445 if (vp) 446 vdrop(vp); 447 } 448 449 /* 450 * Lookup an entry in the cache 451 * 452 * Lookup is called with dvp pointing to the directory to search, 453 * cnp pointing to the name of the entry being sought. If the lookup 454 * succeeds, the vnode is returned in *vpp, and a status of -1 is 455 * returned. If the lookup determines that the name does not exist 456 * (negative cacheing), a status of ENOENT is returned. If the lookup 457 * fails, a status of zero is returned. If the directory vnode is 458 * recycled out from under us due to a forced unmount, a status of 459 * ENOENT is returned. 460 * 461 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 462 * unlocked. If we're looking up . an extra ref is taken, but the lock is 463 * not recursively acquired. 464 */ 465 466 int 467 cache_lookup(dvp, vpp, cnp, tsp, ticksp) 468 struct vnode *dvp; 469 struct vnode **vpp; 470 struct componentname *cnp; 471 struct timespec *tsp; 472 int *ticksp; 473 { 474 struct namecache *ncp; 475 uint32_t hash; 476 int error, ltype, wlocked; 477 478 if (!doingcache) { 479 cnp->cn_flags &= ~MAKEENTRY; 480 return (0); 481 } 482 retry: 483 CACHE_RLOCK(); 484 wlocked = 0; 485 numcalls++; 486 error = 0; 487 488 retry_wlocked: 489 if (cnp->cn_nameptr[0] == '.') { 490 if (cnp->cn_namelen == 1) { 491 *vpp = dvp; 492 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 493 dvp, cnp->cn_nameptr); 494 dothits++; 495 SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".", 496 *vpp, 0, 0); 497 if (tsp != NULL) 498 timespecclear(tsp); 499 if (ticksp != NULL) 500 *ticksp = ticks; 501 goto success; 502 } 503 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 504 dotdothits++; 505 if (dvp->v_cache_dd == NULL) { 506 SDT_PROBE(vfs, namecache, lookup, miss, dvp, 507 "..", NULL, 0, 0); 508 goto unlock; 509 } 510 if ((cnp->cn_flags & MAKEENTRY) == 0) { 511 if (!wlocked && !CACHE_UPGRADE_LOCK()) 512 goto wlock; 513 if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT) 514 cache_zap(dvp->v_cache_dd); 515 dvp->v_cache_dd = NULL; 516 CACHE_WUNLOCK(); 517 return (0); 518 } 519 ncp = dvp->v_cache_dd; 520 if (ncp->nc_flag & NCF_ISDOTDOT) 521 *vpp = ncp->nc_vp; 522 else 523 *vpp = ncp->nc_dvp; 524 /* Return failure if negative entry was found. */ 525 if (*vpp == NULL) 526 goto negative_success; 527 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 528 dvp, cnp->cn_nameptr, *vpp); 529 SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..", 530 *vpp, 0, 0); 531 cache_out_ts(ncp, tsp, ticksp); 532 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 533 NCF_DTS && tsp != NULL) 534 *tsp = ((struct namecache_ts *)ncp)-> 535 nc_dotdottime; 536 goto success; 537 } 538 } 539 540 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT); 541 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 542 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 543 numchecks++; 544 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 545 !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen)) 546 break; 547 } 548 549 /* We failed to find an entry */ 550 if (ncp == NULL) { 551 SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 552 NULL, 0, 0); 553 if ((cnp->cn_flags & MAKEENTRY) == 0) { 554 nummisszap++; 555 } else { 556 nummiss++; 557 } 558 nchstats.ncs_miss++; 559 goto unlock; 560 } 561 562 /* We don't want to have an entry, so dump it */ 563 if ((cnp->cn_flags & MAKEENTRY) == 0) { 564 numposzaps++; 565 nchstats.ncs_badhits++; 566 if (!wlocked && !CACHE_UPGRADE_LOCK()) 567 goto wlock; 568 cache_zap(ncp); 569 CACHE_WUNLOCK(); 570 return (0); 571 } 572 573 /* We found a "positive" match, return the vnode */ 574 if (ncp->nc_vp) { 575 numposhits++; 576 nchstats.ncs_goodhits++; 577 *vpp = ncp->nc_vp; 578 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 579 dvp, cnp->cn_nameptr, *vpp, ncp); 580 SDT_PROBE(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp), 581 *vpp, 0, 0); 582 cache_out_ts(ncp, tsp, ticksp); 583 goto success; 584 } 585 586 negative_success: 587 /* We found a negative match, and want to create it, so purge */ 588 if (cnp->cn_nameiop == CREATE) { 589 numnegzaps++; 590 nchstats.ncs_badhits++; 591 if (!wlocked && !CACHE_UPGRADE_LOCK()) 592 goto wlock; 593 cache_zap(ncp); 594 CACHE_WUNLOCK(); 595 return (0); 596 } 597 598 if (!wlocked && !CACHE_UPGRADE_LOCK()) 599 goto wlock; 600 numneghits++; 601 /* 602 * We found a "negative" match, so we shift it to the end of 603 * the "negative" cache entries queue to satisfy LRU. Also, 604 * check to see if the entry is a whiteout; indicate this to 605 * the componentname, if so. 606 */ 607 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 608 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 609 nchstats.ncs_neghits++; 610 if (ncp->nc_flag & NCF_WHITE) 611 cnp->cn_flags |= ISWHITEOUT; 612 SDT_PROBE(vfs, namecache, lookup, hit__negative, dvp, nc_get_name(ncp), 613 0, 0, 0); 614 cache_out_ts(ncp, tsp, ticksp); 615 CACHE_WUNLOCK(); 616 return (ENOENT); 617 618 wlock: 619 /* 620 * We need to update the cache after our lookup, so upgrade to 621 * a write lock and retry the operation. 622 */ 623 CACHE_RUNLOCK(); 624 CACHE_WLOCK(); 625 numupgrades++; 626 wlocked = 1; 627 goto retry_wlocked; 628 629 success: 630 /* 631 * On success we return a locked and ref'd vnode as per the lookup 632 * protocol. 633 */ 634 if (dvp == *vpp) { /* lookup on "." */ 635 VREF(*vpp); 636 if (wlocked) 637 CACHE_WUNLOCK(); 638 else 639 CACHE_RUNLOCK(); 640 /* 641 * When we lookup "." we still can be asked to lock it 642 * differently... 643 */ 644 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 645 if (ltype != VOP_ISLOCKED(*vpp)) { 646 if (ltype == LK_EXCLUSIVE) { 647 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 648 if ((*vpp)->v_iflag & VI_DOOMED) { 649 /* forced unmount */ 650 vrele(*vpp); 651 *vpp = NULL; 652 return (ENOENT); 653 } 654 } else 655 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 656 } 657 return (-1); 658 } 659 ltype = 0; /* silence gcc warning */ 660 if (cnp->cn_flags & ISDOTDOT) { 661 ltype = VOP_ISLOCKED(dvp); 662 VOP_UNLOCK(dvp, 0); 663 } 664 VI_LOCK(*vpp); 665 if (wlocked) 666 CACHE_WUNLOCK(); 667 else 668 CACHE_RUNLOCK(); 669 error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread); 670 if (cnp->cn_flags & ISDOTDOT) { 671 vn_lock(dvp, ltype | LK_RETRY); 672 if (dvp->v_iflag & VI_DOOMED) { 673 if (error == 0) 674 vput(*vpp); 675 *vpp = NULL; 676 return (ENOENT); 677 } 678 } 679 if (error) { 680 *vpp = NULL; 681 goto retry; 682 } 683 if ((cnp->cn_flags & ISLASTCN) && 684 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 685 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 686 } 687 return (-1); 688 689 unlock: 690 if (wlocked) 691 CACHE_WUNLOCK(); 692 else 693 CACHE_RUNLOCK(); 694 return (0); 695 } 696 697 /* 698 * Add an entry to the cache. 699 */ 700 void 701 cache_enter_time(dvp, vp, cnp, tsp, dtsp) 702 struct vnode *dvp; 703 struct vnode *vp; 704 struct componentname *cnp; 705 struct timespec *tsp; 706 struct timespec *dtsp; 707 { 708 struct namecache *ncp, *n2; 709 struct namecache_ts *n3; 710 struct nchashhead *ncpp; 711 uint32_t hash; 712 int flag; 713 int hold; 714 int zap; 715 int len; 716 717 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 718 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 719 ("cache_enter: Adding a doomed vnode")); 720 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, 721 ("cache_enter: Doomed vnode used as src")); 722 723 if (!doingcache) 724 return; 725 726 /* 727 * Avoid blowout in namecache entries. 728 */ 729 if (numcache >= desiredvnodes * ncsizefactor) 730 return; 731 732 flag = 0; 733 if (cnp->cn_nameptr[0] == '.') { 734 if (cnp->cn_namelen == 1) 735 return; 736 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 737 CACHE_WLOCK(); 738 /* 739 * If dotdot entry already exists, just retarget it 740 * to new parent vnode, otherwise continue with new 741 * namecache entry allocation. 742 */ 743 if ((ncp = dvp->v_cache_dd) != NULL && 744 ncp->nc_flag & NCF_ISDOTDOT) { 745 KASSERT(ncp->nc_dvp == dvp, 746 ("wrong isdotdot parent")); 747 if (ncp->nc_vp != NULL) { 748 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, 749 ncp, nc_dst); 750 } else { 751 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 752 numneg--; 753 } 754 if (vp != NULL) { 755 TAILQ_INSERT_HEAD(&vp->v_cache_dst, 756 ncp, nc_dst); 757 } else { 758 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 759 numneg++; 760 } 761 ncp->nc_vp = vp; 762 CACHE_WUNLOCK(); 763 return; 764 } 765 dvp->v_cache_dd = NULL; 766 SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp, 767 0, 0); 768 CACHE_WUNLOCK(); 769 flag = NCF_ISDOTDOT; 770 } 771 } 772 773 hold = 0; 774 zap = 0; 775 776 /* 777 * Calculate the hash key and setup as much of the new 778 * namecache entry as possible before acquiring the lock. 779 */ 780 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 781 ncp->nc_vp = vp; 782 ncp->nc_dvp = dvp; 783 ncp->nc_flag = flag; 784 if (tsp != NULL) { 785 n3 = (struct namecache_ts *)ncp; 786 n3->nc_time = *tsp; 787 n3->nc_ticks = ticks; 788 n3->nc_flag |= NCF_TS; 789 if (dtsp != NULL) { 790 n3->nc_dotdottime = *dtsp; 791 n3->nc_flag |= NCF_DTS; 792 } 793 } 794 len = ncp->nc_nlen = cnp->cn_namelen; 795 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT); 796 strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1); 797 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 798 CACHE_WLOCK(); 799 800 /* 801 * See if this vnode or negative entry is already in the cache 802 * with this name. This can happen with concurrent lookups of 803 * the same path name. 804 */ 805 ncpp = NCHHASH(hash); 806 LIST_FOREACH(n2, ncpp, nc_hash) { 807 if (n2->nc_dvp == dvp && 808 n2->nc_nlen == cnp->cn_namelen && 809 !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) { 810 if (tsp != NULL) { 811 KASSERT((n2->nc_flag & NCF_TS) != 0, 812 ("no NCF_TS")); 813 n3 = (struct namecache_ts *)n2; 814 n3->nc_time = 815 ((struct namecache_ts *)ncp)->nc_time; 816 n3->nc_ticks = 817 ((struct namecache_ts *)ncp)->nc_ticks; 818 if (dtsp != NULL) { 819 n3->nc_dotdottime = 820 ((struct namecache_ts *)ncp)-> 821 nc_dotdottime; 822 n3->nc_flag |= NCF_DTS; 823 } 824 } 825 CACHE_WUNLOCK(); 826 cache_free(ncp); 827 return; 828 } 829 } 830 831 if (flag == NCF_ISDOTDOT) { 832 /* 833 * See if we are trying to add .. entry, but some other lookup 834 * has populated v_cache_dd pointer already. 835 */ 836 if (dvp->v_cache_dd != NULL) { 837 CACHE_WUNLOCK(); 838 cache_free(ncp); 839 return; 840 } 841 KASSERT(vp == NULL || vp->v_type == VDIR, 842 ("wrong vnode type %p", vp)); 843 dvp->v_cache_dd = ncp; 844 } 845 846 numcache++; 847 if (!vp) { 848 numneg++; 849 if (cnp->cn_flags & ISWHITEOUT) 850 ncp->nc_flag |= NCF_WHITE; 851 } else if (vp->v_type == VDIR) { 852 if (flag != NCF_ISDOTDOT) { 853 /* 854 * For this case, the cache entry maps both the 855 * directory name in it and the name ".." for the 856 * directory's parent. 857 */ 858 if ((n2 = vp->v_cache_dd) != NULL && 859 (n2->nc_flag & NCF_ISDOTDOT) != 0) 860 cache_zap(n2); 861 vp->v_cache_dd = ncp; 862 } 863 } else { 864 vp->v_cache_dd = NULL; 865 } 866 867 /* 868 * Insert the new namecache entry into the appropriate chain 869 * within the cache entries table. 870 */ 871 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 872 if (flag != NCF_ISDOTDOT) { 873 if (LIST_EMPTY(&dvp->v_cache_src)) { 874 hold = 1; 875 numcachehv++; 876 } 877 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 878 } 879 880 /* 881 * If the entry is "negative", we place it into the 882 * "negative" cache queue, otherwise, we place it into the 883 * destination vnode's cache entries queue. 884 */ 885 if (vp) { 886 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 887 SDT_PROBE(vfs, namecache, enter, done, dvp, nc_get_name(ncp), 888 vp, 0, 0); 889 } else { 890 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 891 SDT_PROBE(vfs, namecache, enter_negative, done, dvp, 892 nc_get_name(ncp), 0, 0, 0); 893 } 894 if (numneg * ncnegfactor > numcache) { 895 ncp = TAILQ_FIRST(&ncneg); 896 KASSERT(ncp->nc_vp == NULL, ("ncp %p vp %p on ncneg", 897 ncp, ncp->nc_vp)); 898 zap = 1; 899 } 900 if (hold) 901 vhold(dvp); 902 if (zap) 903 cache_zap(ncp); 904 CACHE_WUNLOCK(); 905 } 906 907 /* 908 * Name cache initialization, from vfs_init() when we are booting 909 */ 910 static void 911 nchinit(void *dummy __unused) 912 { 913 914 TAILQ_INIT(&ncneg); 915 916 cache_zone_small = uma_zcreate("S VFS Cache", 917 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1, 918 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 919 cache_zone_small_ts = uma_zcreate("STS VFS Cache", 920 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1, 921 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 922 cache_zone_large = uma_zcreate("L VFS Cache", 923 sizeof(struct namecache) + NAME_MAX + 1, 924 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 925 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", 926 sizeof(struct namecache_ts) + NAME_MAX + 1, 927 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 928 929 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 930 } 931 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 932 933 934 /* 935 * Invalidate all entries to a particular vnode. 936 */ 937 void 938 cache_purge(vp) 939 struct vnode *vp; 940 { 941 942 CTR1(KTR_VFS, "cache_purge(%p)", vp); 943 SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0); 944 CACHE_WLOCK(); 945 while (!LIST_EMPTY(&vp->v_cache_src)) 946 cache_zap(LIST_FIRST(&vp->v_cache_src)); 947 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 948 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 949 if (vp->v_cache_dd != NULL) { 950 KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT, 951 ("lost dotdot link")); 952 cache_zap(vp->v_cache_dd); 953 } 954 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 955 CACHE_WUNLOCK(); 956 } 957 958 /* 959 * Invalidate all negative entries for a particular directory vnode. 960 */ 961 void 962 cache_purge_negative(vp) 963 struct vnode *vp; 964 { 965 struct namecache *cp, *ncp; 966 967 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp); 968 SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0); 969 CACHE_WLOCK(); 970 LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) { 971 if (cp->nc_vp == NULL) 972 cache_zap(cp); 973 } 974 CACHE_WUNLOCK(); 975 } 976 977 /* 978 * Flush all entries referencing a particular filesystem. 979 */ 980 void 981 cache_purgevfs(mp) 982 struct mount *mp; 983 { 984 struct nchashhead *ncpp; 985 struct namecache *ncp, *nnp; 986 987 /* Scan hash tables for applicable entries */ 988 SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0); 989 CACHE_WLOCK(); 990 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 991 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) { 992 if (ncp->nc_dvp->v_mount == mp) 993 cache_zap(ncp); 994 } 995 } 996 CACHE_WUNLOCK(); 997 } 998 999 /* 1000 * Perform canonical checks and cache lookup and pass on to filesystem 1001 * through the vop_cachedlookup only if needed. 1002 */ 1003 1004 int 1005 vfs_cache_lookup(ap) 1006 struct vop_lookup_args /* { 1007 struct vnode *a_dvp; 1008 struct vnode **a_vpp; 1009 struct componentname *a_cnp; 1010 } */ *ap; 1011 { 1012 struct vnode *dvp; 1013 int error; 1014 struct vnode **vpp = ap->a_vpp; 1015 struct componentname *cnp = ap->a_cnp; 1016 struct ucred *cred = cnp->cn_cred; 1017 int flags = cnp->cn_flags; 1018 struct thread *td = cnp->cn_thread; 1019 1020 *vpp = NULL; 1021 dvp = ap->a_dvp; 1022 1023 if (dvp->v_type != VDIR) 1024 return (ENOTDIR); 1025 1026 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 1027 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1028 return (EROFS); 1029 1030 error = VOP_ACCESS(dvp, VEXEC, cred, td); 1031 if (error) 1032 return (error); 1033 1034 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 1035 if (error == 0) 1036 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 1037 if (error == -1) 1038 return (0); 1039 return (error); 1040 } 1041 1042 /* 1043 * XXX All of these sysctls would probably be more productive dead. 1044 */ 1045 static int disablecwd; 1046 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 1047 "Disable the getcwd syscall"); 1048 1049 /* Implementation of the getcwd syscall. */ 1050 int 1051 sys___getcwd(td, uap) 1052 struct thread *td; 1053 struct __getcwd_args *uap; 1054 { 1055 1056 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen, 1057 MAXPATHLEN)); 1058 } 1059 1060 int 1061 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen, 1062 u_int path_max) 1063 { 1064 char *bp, *tmpbuf; 1065 struct filedesc *fdp; 1066 struct vnode *cdir, *rdir; 1067 int error; 1068 1069 if (disablecwd) 1070 return (ENODEV); 1071 if (buflen < 2) 1072 return (EINVAL); 1073 if (buflen > path_max) 1074 buflen = path_max; 1075 1076 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 1077 fdp = td->td_proc->p_fd; 1078 FILEDESC_SLOCK(fdp); 1079 cdir = fdp->fd_cdir; 1080 VREF(cdir); 1081 rdir = fdp->fd_rdir; 1082 VREF(rdir); 1083 FILEDESC_SUNLOCK(fdp); 1084 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 1085 vrele(rdir); 1086 vrele(cdir); 1087 1088 if (!error) { 1089 if (bufseg == UIO_SYSSPACE) 1090 bcopy(bp, buf, strlen(bp) + 1); 1091 else 1092 error = copyout(bp, buf, strlen(bp) + 1); 1093 #ifdef KTRACE 1094 if (KTRPOINT(curthread, KTR_NAMEI)) 1095 ktrnamei(bp); 1096 #endif 1097 } 1098 free(tmpbuf, M_TEMP); 1099 return (error); 1100 } 1101 1102 /* 1103 * Thus begins the fullpath magic. 1104 */ 1105 1106 #undef STATNODE 1107 #define STATNODE(name, descr) \ 1108 static u_int name; \ 1109 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr) 1110 1111 static int disablefullpath; 1112 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 1113 "Disable the vn_fullpath function"); 1114 1115 /* These count for kern___getcwd(), too. */ 1116 STATNODE(numfullpathcalls, "Number of fullpath search calls"); 1117 STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 1118 STATNODE(numfullpathfail2, 1119 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 1120 STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 1121 STATNODE(numfullpathfound, "Number of successful fullpath calls"); 1122 1123 /* 1124 * Retrieve the full filesystem path that correspond to a vnode from the name 1125 * cache (if available) 1126 */ 1127 int 1128 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 1129 { 1130 char *buf; 1131 struct filedesc *fdp; 1132 struct vnode *rdir; 1133 int error; 1134 1135 if (disablefullpath) 1136 return (ENODEV); 1137 if (vn == NULL) 1138 return (EINVAL); 1139 1140 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1141 fdp = td->td_proc->p_fd; 1142 FILEDESC_SLOCK(fdp); 1143 rdir = fdp->fd_rdir; 1144 VREF(rdir); 1145 FILEDESC_SUNLOCK(fdp); 1146 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 1147 vrele(rdir); 1148 1149 if (!error) 1150 *freebuf = buf; 1151 else 1152 free(buf, M_TEMP); 1153 return (error); 1154 } 1155 1156 /* 1157 * This function is similar to vn_fullpath, but it attempts to lookup the 1158 * pathname relative to the global root mount point. This is required for the 1159 * auditing sub-system, as audited pathnames must be absolute, relative to the 1160 * global root mount point. 1161 */ 1162 int 1163 vn_fullpath_global(struct thread *td, struct vnode *vn, 1164 char **retbuf, char **freebuf) 1165 { 1166 char *buf; 1167 int error; 1168 1169 if (disablefullpath) 1170 return (ENODEV); 1171 if (vn == NULL) 1172 return (EINVAL); 1173 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1174 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 1175 if (!error) 1176 *freebuf = buf; 1177 else 1178 free(buf, M_TEMP); 1179 return (error); 1180 } 1181 1182 int 1183 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) 1184 { 1185 int error; 1186 1187 CACHE_RLOCK(); 1188 error = vn_vptocnp_locked(vp, cred, buf, buflen); 1189 if (error == 0) 1190 CACHE_RUNLOCK(); 1191 return (error); 1192 } 1193 1194 static int 1195 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 1196 u_int *buflen) 1197 { 1198 struct vnode *dvp; 1199 struct namecache *ncp; 1200 int error; 1201 1202 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) { 1203 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1204 break; 1205 } 1206 if (ncp != NULL) { 1207 if (*buflen < ncp->nc_nlen) { 1208 CACHE_RUNLOCK(); 1209 vrele(*vp); 1210 numfullpathfail4++; 1211 error = ENOMEM; 1212 SDT_PROBE(vfs, namecache, fullpath, return, error, 1213 vp, NULL, 0, 0); 1214 return (error); 1215 } 1216 *buflen -= ncp->nc_nlen; 1217 memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen); 1218 SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp, 1219 nc_get_name(ncp), vp, 0, 0); 1220 dvp = *vp; 1221 *vp = ncp->nc_dvp; 1222 vref(*vp); 1223 CACHE_RUNLOCK(); 1224 vrele(dvp); 1225 CACHE_RLOCK(); 1226 return (0); 1227 } 1228 SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0); 1229 1230 CACHE_RUNLOCK(); 1231 vn_lock(*vp, LK_SHARED | LK_RETRY); 1232 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen); 1233 vput(*vp); 1234 if (error) { 1235 numfullpathfail2++; 1236 SDT_PROBE(vfs, namecache, fullpath, return, error, vp, 1237 NULL, 0, 0); 1238 return (error); 1239 } 1240 1241 *vp = dvp; 1242 CACHE_RLOCK(); 1243 if (dvp->v_iflag & VI_DOOMED) { 1244 /* forced unmount */ 1245 CACHE_RUNLOCK(); 1246 vrele(dvp); 1247 error = ENOENT; 1248 SDT_PROBE(vfs, namecache, fullpath, return, error, vp, 1249 NULL, 0, 0); 1250 return (error); 1251 } 1252 /* 1253 * *vp has its use count incremented still. 1254 */ 1255 1256 return (0); 1257 } 1258 1259 /* 1260 * The magic behind kern___getcwd() and vn_fullpath(). 1261 */ 1262 static int 1263 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 1264 char *buf, char **retbuf, u_int buflen) 1265 { 1266 int error, slash_prefixed; 1267 #ifdef KDTRACE_HOOKS 1268 struct vnode *startvp = vp; 1269 #endif 1270 struct vnode *vp1; 1271 1272 buflen--; 1273 buf[buflen] = '\0'; 1274 error = 0; 1275 slash_prefixed = 0; 1276 1277 SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0); 1278 numfullpathcalls++; 1279 vref(vp); 1280 CACHE_RLOCK(); 1281 if (vp->v_type != VDIR) { 1282 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1283 if (error) 1284 return (error); 1285 if (buflen == 0) { 1286 CACHE_RUNLOCK(); 1287 vrele(vp); 1288 return (ENOMEM); 1289 } 1290 buf[--buflen] = '/'; 1291 slash_prefixed = 1; 1292 } 1293 while (vp != rdir && vp != rootvnode) { 1294 if (vp->v_vflag & VV_ROOT) { 1295 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */ 1296 CACHE_RUNLOCK(); 1297 vrele(vp); 1298 error = ENOENT; 1299 SDT_PROBE(vfs, namecache, fullpath, return, 1300 error, vp, NULL, 0, 0); 1301 break; 1302 } 1303 vp1 = vp->v_mount->mnt_vnodecovered; 1304 vref(vp1); 1305 CACHE_RUNLOCK(); 1306 vrele(vp); 1307 vp = vp1; 1308 CACHE_RLOCK(); 1309 continue; 1310 } 1311 if (vp->v_type != VDIR) { 1312 CACHE_RUNLOCK(); 1313 vrele(vp); 1314 numfullpathfail1++; 1315 error = ENOTDIR; 1316 SDT_PROBE(vfs, namecache, fullpath, return, 1317 error, vp, NULL, 0, 0); 1318 break; 1319 } 1320 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1321 if (error) 1322 break; 1323 if (buflen == 0) { 1324 CACHE_RUNLOCK(); 1325 vrele(vp); 1326 error = ENOMEM; 1327 SDT_PROBE(vfs, namecache, fullpath, return, error, 1328 startvp, NULL, 0, 0); 1329 break; 1330 } 1331 buf[--buflen] = '/'; 1332 slash_prefixed = 1; 1333 } 1334 if (error) 1335 return (error); 1336 if (!slash_prefixed) { 1337 if (buflen == 0) { 1338 CACHE_RUNLOCK(); 1339 vrele(vp); 1340 numfullpathfail4++; 1341 SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM, 1342 startvp, NULL, 0, 0); 1343 return (ENOMEM); 1344 } 1345 buf[--buflen] = '/'; 1346 } 1347 numfullpathfound++; 1348 CACHE_RUNLOCK(); 1349 vrele(vp); 1350 1351 SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen, 1352 0, 0); 1353 *retbuf = buf + buflen; 1354 return (0); 1355 } 1356 1357 struct vnode * 1358 vn_dir_dd_ino(struct vnode *vp) 1359 { 1360 struct namecache *ncp; 1361 struct vnode *ddvp; 1362 1363 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 1364 CACHE_RLOCK(); 1365 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 1366 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 1367 continue; 1368 ddvp = ncp->nc_dvp; 1369 VI_LOCK(ddvp); 1370 CACHE_RUNLOCK(); 1371 if (vget(ddvp, LK_INTERLOCK | LK_SHARED | LK_NOWAIT, curthread)) 1372 return (NULL); 1373 return (ddvp); 1374 } 1375 CACHE_RUNLOCK(); 1376 return (NULL); 1377 } 1378 1379 int 1380 vn_commname(struct vnode *vp, char *buf, u_int buflen) 1381 { 1382 struct namecache *ncp; 1383 int l; 1384 1385 CACHE_RLOCK(); 1386 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 1387 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1388 break; 1389 if (ncp == NULL) { 1390 CACHE_RUNLOCK(); 1391 return (ENOENT); 1392 } 1393 l = min(ncp->nc_nlen, buflen - 1); 1394 memcpy(buf, nc_get_name(ncp), l); 1395 CACHE_RUNLOCK(); 1396 buf[l] = '\0'; 1397 return (0); 1398 } 1399 1400 /* ABI compat shims for old kernel modules. */ 1401 #undef cache_enter 1402 1403 void cache_enter(struct vnode *dvp, struct vnode *vp, 1404 struct componentname *cnp); 1405 1406 void 1407 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 1408 { 1409 1410 cache_enter_time(dvp, vp, cnp, NULL, NULL); 1411 } 1412 1413 /* 1414 * This function updates path string to vnode's full global path 1415 * and checks the size of the new path string against the pathlen argument. 1416 * 1417 * Requires a locked, referenced vnode. 1418 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 1419 * 1420 * If sysctl debug.disablefullpath is set, ENODEV is returned, 1421 * vnode is left locked and path remain untouched. 1422 * 1423 * If vp is a directory, the call to vn_fullpath_global() always succeeds 1424 * because it falls back to the ".." lookup if the namecache lookup fails. 1425 */ 1426 int 1427 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 1428 u_int pathlen) 1429 { 1430 struct nameidata nd; 1431 struct vnode *vp1; 1432 char *rpath, *fbuf; 1433 int error; 1434 1435 ASSERT_VOP_ELOCKED(vp, __func__); 1436 1437 /* Return ENODEV if sysctl debug.disablefullpath==1 */ 1438 if (disablefullpath) 1439 return (ENODEV); 1440 1441 /* Construct global filesystem path from vp. */ 1442 VOP_UNLOCK(vp, 0); 1443 error = vn_fullpath_global(td, vp, &rpath, &fbuf); 1444 1445 if (error != 0) { 1446 vrele(vp); 1447 return (error); 1448 } 1449 1450 if (strlen(rpath) >= pathlen) { 1451 vrele(vp); 1452 error = ENAMETOOLONG; 1453 goto out; 1454 } 1455 1456 /* 1457 * Re-lookup the vnode by path to detect a possible rename. 1458 * As a side effect, the vnode is relocked. 1459 * If vnode was renamed, return ENOENT. 1460 */ 1461 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 1462 UIO_SYSSPACE, path, td); 1463 error = namei(&nd); 1464 if (error != 0) { 1465 vrele(vp); 1466 goto out; 1467 } 1468 NDFREE(&nd, NDF_ONLY_PNBUF); 1469 vp1 = nd.ni_vp; 1470 vrele(vp); 1471 if (vp1 == vp) 1472 strcpy(path, rpath); 1473 else { 1474 vput(vp1); 1475 error = ENOENT; 1476 } 1477 1478 out: 1479 free(fbuf, M_TEMP); 1480 return (error); 1481 } 1482