1 /*- 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_kdtrace.h" 39 #include "opt_ktrace.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/filedesc.h> 44 #include <sys/fnv_hash.h> 45 #include <sys/kernel.h> 46 #include <sys/lock.h> 47 #include <sys/malloc.h> 48 #include <sys/fcntl.h> 49 #include <sys/mount.h> 50 #include <sys/namei.h> 51 #include <sys/proc.h> 52 #include <sys/rwlock.h> 53 #include <sys/sdt.h> 54 #include <sys/syscallsubr.h> 55 #include <sys/sysctl.h> 56 #include <sys/sysproto.h> 57 #include <sys/vnode.h> 58 #ifdef KTRACE 59 #include <sys/ktrace.h> 60 #endif 61 62 #include <vm/uma.h> 63 64 SDT_PROVIDER_DECLARE(vfs); 65 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, done, "struct vnode *", "char *", 66 "struct vnode *"); 67 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, done, "struct vnode *", 68 "char *"); 69 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, entry, "struct vnode *"); 70 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, hit, "struct vnode *", 71 "struct char *", "struct vnode *"); 72 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, miss, "struct vnode *"); 73 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, return, "int", 74 "struct vnode *", "struct char *"); 75 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, hit, "struct vnode *", "char *", 76 "struct vnode *"); 77 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit_negative, hit-negative, 78 "struct vnode *", "char *"); 79 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, miss, "struct vnode *", 80 "char *"); 81 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, done, "struct vnode *"); 82 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, done, "struct vnode *"); 83 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, done, "struct mount *"); 84 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, done, "struct vnode *", "char *", 85 "struct vnode *"); 86 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, done, "struct vnode *", 87 "char *"); 88 89 /* 90 * This structure describes the elements in the cache of recent 91 * names looked up by namei. 92 */ 93 94 struct namecache { 95 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 96 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 97 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 98 struct vnode *nc_dvp; /* vnode of parent of name */ 99 struct vnode *nc_vp; /* vnode the name refers to */ 100 u_char nc_flag; /* flag bits */ 101 u_char nc_nlen; /* length of name */ 102 char nc_name[0]; /* segment name + nul */ 103 }; 104 105 /* 106 * struct namecache_ts repeats struct namecache layout up to the 107 * nc_nlen member. 108 * struct namecache_ts is used in place of struct namecache when time(s) need 109 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 110 * both a non-dotdot directory name plus dotdot for the directory's 111 * parent. 112 */ 113 struct namecache_ts { 114 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 115 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 116 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 117 struct vnode *nc_dvp; /* vnode of parent of name */ 118 struct vnode *nc_vp; /* vnode the name refers to */ 119 u_char nc_flag; /* flag bits */ 120 u_char nc_nlen; /* length of name */ 121 struct timespec nc_time; /* timespec provided by fs */ 122 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 123 int nc_ticks; /* ticks value when entry was added */ 124 char nc_name[0]; /* segment name + nul */ 125 }; 126 127 /* 128 * Flags in namecache.nc_flag 129 */ 130 #define NCF_WHITE 0x01 131 #define NCF_ISDOTDOT 0x02 132 #define NCF_TS 0x04 133 #define NCF_DTS 0x08 134 135 /* 136 * Name caching works as follows: 137 * 138 * Names found by directory scans are retained in a cache 139 * for future reference. It is managed LRU, so frequently 140 * used names will hang around. Cache is indexed by hash value 141 * obtained from (vp, name) where vp refers to the directory 142 * containing name. 143 * 144 * If it is a "negative" entry, (i.e. for a name that is known NOT to 145 * exist) the vnode pointer will be NULL. 146 * 147 * Upon reaching the last segment of a path, if the reference 148 * is for DELETE, or NOCACHE is set (rewrite), and the 149 * name is located in the cache, it will be dropped. 150 */ 151 152 /* 153 * Structures associated with name cacheing. 154 */ 155 #define NCHHASH(hash) \ 156 (&nchashtbl[(hash) & nchash]) 157 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 158 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 159 static u_long nchash; /* size of hash table */ 160 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 161 "Size of namecache hash table"); 162 static u_long ncnegfactor = 16; /* ratio of negative entries */ 163 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 164 "Ratio of negative namecache entries"); 165 static u_long numneg; /* number of negative entries allocated */ 166 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 167 "Number of negative entries in namecache"); 168 static u_long numcache; /* number of cache entries allocated */ 169 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 170 "Number of namecache entries"); 171 static u_long numcachehv; /* number of cache entries with vnodes held */ 172 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, 173 "Number of namecache entries with vnodes held"); 174 static u_int ncsizefactor = 2; 175 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0, 176 "Size factor for namecache"); 177 178 struct nchstats nchstats; /* cache effectiveness statistics */ 179 180 static struct rwlock cache_lock; 181 RW_SYSINIT(vfscache, &cache_lock, "Name Cache"); 182 183 #define CACHE_UPGRADE_LOCK() rw_try_upgrade(&cache_lock) 184 #define CACHE_RLOCK() rw_rlock(&cache_lock) 185 #define CACHE_RUNLOCK() rw_runlock(&cache_lock) 186 #define CACHE_WLOCK() rw_wlock(&cache_lock) 187 #define CACHE_WUNLOCK() rw_wunlock(&cache_lock) 188 189 /* 190 * UMA zones for the VFS cache. 191 * 192 * The small cache is used for entries with short names, which are the 193 * most common. The large cache is used for entries which are too big to 194 * fit in the small cache. 195 */ 196 static uma_zone_t cache_zone_small; 197 static uma_zone_t cache_zone_small_ts; 198 static uma_zone_t cache_zone_large; 199 static uma_zone_t cache_zone_large_ts; 200 201 #define CACHE_PATH_CUTOFF 35 202 203 static struct namecache * 204 cache_alloc(int len, int ts) 205 { 206 207 if (len > CACHE_PATH_CUTOFF) { 208 if (ts) 209 return (uma_zalloc(cache_zone_large_ts, M_WAITOK)); 210 else 211 return (uma_zalloc(cache_zone_large, M_WAITOK)); 212 } 213 if (ts) 214 return (uma_zalloc(cache_zone_small_ts, M_WAITOK)); 215 else 216 return (uma_zalloc(cache_zone_small, M_WAITOK)); 217 } 218 219 static void 220 cache_free(struct namecache *ncp) 221 { 222 int ts; 223 224 if (ncp == NULL) 225 return; 226 ts = ncp->nc_flag & NCF_TS; 227 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) { 228 if (ts) 229 uma_zfree(cache_zone_small_ts, ncp); 230 else 231 uma_zfree(cache_zone_small, ncp); 232 } else if (ts) 233 uma_zfree(cache_zone_large_ts, ncp); 234 else 235 uma_zfree(cache_zone_large, ncp); 236 } 237 238 static char * 239 nc_get_name(struct namecache *ncp) 240 { 241 struct namecache_ts *ncp_ts; 242 243 if ((ncp->nc_flag & NCF_TS) == 0) 244 return (ncp->nc_name); 245 ncp_ts = (struct namecache_ts *)ncp; 246 return (ncp_ts->nc_name); 247 } 248 249 static void 250 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 251 { 252 253 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 254 (tsp == NULL && ticksp == NULL), 255 ("No NCF_TS")); 256 257 if (tsp != NULL) 258 *tsp = ((struct namecache_ts *)ncp)->nc_time; 259 if (ticksp != NULL) 260 *ticksp = ((struct namecache_ts *)ncp)->nc_ticks; 261 } 262 263 static int doingcache = 1; /* 1 => enable the cache */ 264 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 265 "VFS namecache enabled"); 266 267 /* Export size information to userland */ 268 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, 0, 269 sizeof(struct namecache), "sizeof(struct namecache)"); 270 271 /* 272 * The new name cache statistics 273 */ 274 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, 275 "Name cache statistics"); 276 #define STATNODE(mode, name, var, descr) \ 277 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr); 278 STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries"); 279 STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries"); 280 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls, 281 "Number of cache lookups"); 282 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits, 283 "Number of '.' hits"); 284 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits, 285 "Number of '..' hits"); 286 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks, 287 "Number of checks in lookup"); 288 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss, 289 "Number of cache misses"); 290 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap, 291 "Number of cache misses we do not want to cache"); 292 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps, 293 "Number of cache hits (positive) we do not want to cache"); 294 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits, 295 "Number of cache hits (positive)"); 296 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps, 297 "Number of cache hits (negative) we do not want to cache"); 298 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits, 299 "Number of cache hits (negative)"); 300 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades, 301 "Number of updates of the cache after lookup (write lock + retry)"); 302 303 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE, 304 &nchstats, sizeof(nchstats), "LU", 305 "VFS cache effectiveness statistics"); 306 307 308 309 static void cache_zap(struct namecache *ncp); 310 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 311 u_int *buflen); 312 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 313 char *buf, char **retbuf, u_int buflen); 314 315 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 316 317 #ifdef DIAGNOSTIC 318 /* 319 * Grab an atomic snapshot of the name cache hash chain lengths 320 */ 321 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, 322 "hash table stats"); 323 324 static int 325 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 326 { 327 int error; 328 struct nchashhead *ncpp; 329 struct namecache *ncp; 330 int n_nchash; 331 int count; 332 333 n_nchash = nchash + 1; /* nchash is max index, not count */ 334 if (!req->oldptr) 335 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 336 337 /* Scan hash tables for applicable entries */ 338 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 339 CACHE_RLOCK(); 340 count = 0; 341 LIST_FOREACH(ncp, ncpp, nc_hash) { 342 count++; 343 } 344 CACHE_RUNLOCK(); 345 error = SYSCTL_OUT(req, &count, sizeof(count)); 346 if (error) 347 return (error); 348 } 349 return (0); 350 } 351 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 352 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 353 "nchash chain lengths"); 354 355 static int 356 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 357 { 358 int error; 359 struct nchashhead *ncpp; 360 struct namecache *ncp; 361 int n_nchash; 362 int count, maxlength, used, pct; 363 364 if (!req->oldptr) 365 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 366 367 n_nchash = nchash + 1; /* nchash is max index, not count */ 368 used = 0; 369 maxlength = 0; 370 371 /* Scan hash tables for applicable entries */ 372 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 373 count = 0; 374 CACHE_RLOCK(); 375 LIST_FOREACH(ncp, ncpp, nc_hash) { 376 count++; 377 } 378 CACHE_RUNLOCK(); 379 if (count) 380 used++; 381 if (maxlength < count) 382 maxlength = count; 383 } 384 n_nchash = nchash + 1; 385 pct = (used * 100) / (n_nchash / 100); 386 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 387 if (error) 388 return (error); 389 error = SYSCTL_OUT(req, &used, sizeof(used)); 390 if (error) 391 return (error); 392 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 393 if (error) 394 return (error); 395 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 396 if (error) 397 return (error); 398 return (0); 399 } 400 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 401 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 402 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 403 #endif 404 405 /* 406 * cache_zap(): 407 * 408 * Removes a namecache entry from cache, whether it contains an actual 409 * pointer to a vnode or if it is just a negative cache entry. 410 */ 411 static void 412 cache_zap(ncp) 413 struct namecache *ncp; 414 { 415 struct vnode *vp; 416 417 rw_assert(&cache_lock, RA_WLOCKED); 418 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); 419 #ifdef KDTRACE_HOOKS 420 if (ncp->nc_vp != NULL) { 421 SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp, 422 nc_get_name(ncp), ncp->nc_vp, 0, 0); 423 } else { 424 SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp, 425 nc_get_name(ncp), 0, 0, 0); 426 } 427 #endif 428 vp = NULL; 429 LIST_REMOVE(ncp, nc_hash); 430 if (ncp->nc_flag & NCF_ISDOTDOT) { 431 if (ncp == ncp->nc_dvp->v_cache_dd) 432 ncp->nc_dvp->v_cache_dd = NULL; 433 } else { 434 LIST_REMOVE(ncp, nc_src); 435 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 436 vp = ncp->nc_dvp; 437 numcachehv--; 438 } 439 } 440 if (ncp->nc_vp) { 441 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 442 if (ncp == ncp->nc_vp->v_cache_dd) 443 ncp->nc_vp->v_cache_dd = NULL; 444 } else { 445 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 446 numneg--; 447 } 448 numcache--; 449 cache_free(ncp); 450 if (vp) 451 vdrop(vp); 452 } 453 454 /* 455 * Lookup an entry in the cache 456 * 457 * Lookup is called with dvp pointing to the directory to search, 458 * cnp pointing to the name of the entry being sought. If the lookup 459 * succeeds, the vnode is returned in *vpp, and a status of -1 is 460 * returned. If the lookup determines that the name does not exist 461 * (negative cacheing), a status of ENOENT is returned. If the lookup 462 * fails, a status of zero is returned. If the directory vnode is 463 * recycled out from under us due to a forced unmount, a status of 464 * ENOENT is returned. 465 * 466 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 467 * unlocked. If we're looking up . an extra ref is taken, but the lock is 468 * not recursively acquired. 469 */ 470 471 int 472 cache_lookup(dvp, vpp, cnp, tsp, ticksp) 473 struct vnode *dvp; 474 struct vnode **vpp; 475 struct componentname *cnp; 476 struct timespec *tsp; 477 int *ticksp; 478 { 479 struct namecache *ncp; 480 uint32_t hash; 481 int error, ltype, wlocked; 482 483 if (!doingcache) { 484 cnp->cn_flags &= ~MAKEENTRY; 485 return (0); 486 } 487 retry: 488 CACHE_RLOCK(); 489 wlocked = 0; 490 numcalls++; 491 error = 0; 492 493 retry_wlocked: 494 if (cnp->cn_nameptr[0] == '.') { 495 if (cnp->cn_namelen == 1) { 496 *vpp = dvp; 497 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 498 dvp, cnp->cn_nameptr); 499 dothits++; 500 SDT_PROBE(vfs, namecache, lookup, hit, dvp, ".", 501 *vpp, 0, 0); 502 if (tsp != NULL) 503 timespecclear(tsp); 504 if (ticksp != NULL) 505 *ticksp = ticks; 506 goto success; 507 } 508 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 509 dotdothits++; 510 if (dvp->v_cache_dd == NULL) { 511 SDT_PROBE(vfs, namecache, lookup, miss, dvp, 512 "..", NULL, 0, 0); 513 goto unlock; 514 } 515 if ((cnp->cn_flags & MAKEENTRY) == 0) { 516 if (!wlocked && !CACHE_UPGRADE_LOCK()) 517 goto wlock; 518 if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT) 519 cache_zap(dvp->v_cache_dd); 520 dvp->v_cache_dd = NULL; 521 CACHE_WUNLOCK(); 522 return (0); 523 } 524 ncp = dvp->v_cache_dd; 525 if (ncp->nc_flag & NCF_ISDOTDOT) 526 *vpp = ncp->nc_vp; 527 else 528 *vpp = ncp->nc_dvp; 529 /* Return failure if negative entry was found. */ 530 if (*vpp == NULL) 531 goto negative_success; 532 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 533 dvp, cnp->cn_nameptr, *vpp); 534 SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..", 535 *vpp, 0, 0); 536 cache_out_ts(ncp, tsp, ticksp); 537 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 538 NCF_DTS && tsp != NULL) 539 *tsp = ((struct namecache_ts *)ncp)-> 540 nc_dotdottime; 541 goto success; 542 } 543 } 544 545 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT); 546 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 547 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 548 numchecks++; 549 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 550 !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen)) 551 break; 552 } 553 554 /* We failed to find an entry */ 555 if (ncp == NULL) { 556 SDT_PROBE(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 557 NULL, 0, 0); 558 if ((cnp->cn_flags & MAKEENTRY) == 0) { 559 nummisszap++; 560 } else { 561 nummiss++; 562 } 563 nchstats.ncs_miss++; 564 goto unlock; 565 } 566 567 /* We don't want to have an entry, so dump it */ 568 if ((cnp->cn_flags & MAKEENTRY) == 0) { 569 numposzaps++; 570 nchstats.ncs_badhits++; 571 if (!wlocked && !CACHE_UPGRADE_LOCK()) 572 goto wlock; 573 cache_zap(ncp); 574 CACHE_WUNLOCK(); 575 return (0); 576 } 577 578 /* We found a "positive" match, return the vnode */ 579 if (ncp->nc_vp) { 580 numposhits++; 581 nchstats.ncs_goodhits++; 582 *vpp = ncp->nc_vp; 583 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 584 dvp, cnp->cn_nameptr, *vpp, ncp); 585 SDT_PROBE(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp), 586 *vpp, 0, 0); 587 cache_out_ts(ncp, tsp, ticksp); 588 goto success; 589 } 590 591 negative_success: 592 /* We found a negative match, and want to create it, so purge */ 593 if (cnp->cn_nameiop == CREATE) { 594 numnegzaps++; 595 nchstats.ncs_badhits++; 596 if (!wlocked && !CACHE_UPGRADE_LOCK()) 597 goto wlock; 598 cache_zap(ncp); 599 CACHE_WUNLOCK(); 600 return (0); 601 } 602 603 if (!wlocked && !CACHE_UPGRADE_LOCK()) 604 goto wlock; 605 numneghits++; 606 /* 607 * We found a "negative" match, so we shift it to the end of 608 * the "negative" cache entries queue to satisfy LRU. Also, 609 * check to see if the entry is a whiteout; indicate this to 610 * the componentname, if so. 611 */ 612 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 613 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 614 nchstats.ncs_neghits++; 615 if (ncp->nc_flag & NCF_WHITE) 616 cnp->cn_flags |= ISWHITEOUT; 617 SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, nc_get_name(ncp), 618 0, 0, 0); 619 cache_out_ts(ncp, tsp, ticksp); 620 CACHE_WUNLOCK(); 621 return (ENOENT); 622 623 wlock: 624 /* 625 * We need to update the cache after our lookup, so upgrade to 626 * a write lock and retry the operation. 627 */ 628 CACHE_RUNLOCK(); 629 CACHE_WLOCK(); 630 numupgrades++; 631 wlocked = 1; 632 goto retry_wlocked; 633 634 success: 635 /* 636 * On success we return a locked and ref'd vnode as per the lookup 637 * protocol. 638 */ 639 if (dvp == *vpp) { /* lookup on "." */ 640 VREF(*vpp); 641 if (wlocked) 642 CACHE_WUNLOCK(); 643 else 644 CACHE_RUNLOCK(); 645 /* 646 * When we lookup "." we still can be asked to lock it 647 * differently... 648 */ 649 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 650 if (ltype != VOP_ISLOCKED(*vpp)) { 651 if (ltype == LK_EXCLUSIVE) { 652 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 653 if ((*vpp)->v_iflag & VI_DOOMED) { 654 /* forced unmount */ 655 vrele(*vpp); 656 *vpp = NULL; 657 return (ENOENT); 658 } 659 } else 660 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 661 } 662 return (-1); 663 } 664 ltype = 0; /* silence gcc warning */ 665 if (cnp->cn_flags & ISDOTDOT) { 666 ltype = VOP_ISLOCKED(dvp); 667 VOP_UNLOCK(dvp, 0); 668 } 669 VI_LOCK(*vpp); 670 if (wlocked) 671 CACHE_WUNLOCK(); 672 else 673 CACHE_RUNLOCK(); 674 error = vget(*vpp, cnp->cn_lkflags | LK_INTERLOCK, cnp->cn_thread); 675 if (cnp->cn_flags & ISDOTDOT) { 676 vn_lock(dvp, ltype | LK_RETRY); 677 if (dvp->v_iflag & VI_DOOMED) { 678 if (error == 0) 679 vput(*vpp); 680 *vpp = NULL; 681 return (ENOENT); 682 } 683 } 684 if (error) { 685 *vpp = NULL; 686 goto retry; 687 } 688 if ((cnp->cn_flags & ISLASTCN) && 689 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 690 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 691 } 692 return (-1); 693 694 unlock: 695 if (wlocked) 696 CACHE_WUNLOCK(); 697 else 698 CACHE_RUNLOCK(); 699 return (0); 700 } 701 702 /* 703 * Add an entry to the cache. 704 */ 705 void 706 cache_enter_time(dvp, vp, cnp, tsp, dtsp) 707 struct vnode *dvp; 708 struct vnode *vp; 709 struct componentname *cnp; 710 struct timespec *tsp; 711 struct timespec *dtsp; 712 { 713 struct namecache *ncp, *n2; 714 struct namecache_ts *n3; 715 struct nchashhead *ncpp; 716 uint32_t hash; 717 int flag; 718 int hold; 719 int zap; 720 int len; 721 722 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 723 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 724 ("cache_enter: Adding a doomed vnode")); 725 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, 726 ("cache_enter: Doomed vnode used as src")); 727 728 if (!doingcache) 729 return; 730 731 /* 732 * Avoid blowout in namecache entries. 733 */ 734 if (numcache >= desiredvnodes * ncsizefactor) 735 return; 736 737 flag = 0; 738 if (cnp->cn_nameptr[0] == '.') { 739 if (cnp->cn_namelen == 1) 740 return; 741 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 742 CACHE_WLOCK(); 743 /* 744 * If dotdot entry already exists, just retarget it 745 * to new parent vnode, otherwise continue with new 746 * namecache entry allocation. 747 */ 748 if ((ncp = dvp->v_cache_dd) != NULL && 749 ncp->nc_flag & NCF_ISDOTDOT) { 750 KASSERT(ncp->nc_dvp == dvp, 751 ("wrong isdotdot parent")); 752 if (ncp->nc_vp != NULL) 753 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, 754 ncp, nc_dst); 755 else 756 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 757 if (vp != NULL) 758 TAILQ_INSERT_HEAD(&vp->v_cache_dst, 759 ncp, nc_dst); 760 else 761 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 762 ncp->nc_vp = vp; 763 CACHE_WUNLOCK(); 764 return; 765 } 766 dvp->v_cache_dd = NULL; 767 SDT_PROBE(vfs, namecache, enter, done, dvp, "..", vp, 768 0, 0); 769 CACHE_WUNLOCK(); 770 flag = NCF_ISDOTDOT; 771 } 772 } 773 774 hold = 0; 775 zap = 0; 776 777 /* 778 * Calculate the hash key and setup as much of the new 779 * namecache entry as possible before acquiring the lock. 780 */ 781 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 782 ncp->nc_vp = vp; 783 ncp->nc_dvp = dvp; 784 ncp->nc_flag = flag; 785 if (tsp != NULL) { 786 n3 = (struct namecache_ts *)ncp; 787 n3->nc_time = *tsp; 788 n3->nc_ticks = ticks; 789 n3->nc_flag |= NCF_TS; 790 if (dtsp != NULL) { 791 n3->nc_dotdottime = *dtsp; 792 n3->nc_flag |= NCF_DTS; 793 } 794 } 795 len = ncp->nc_nlen = cnp->cn_namelen; 796 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT); 797 strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1); 798 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 799 CACHE_WLOCK(); 800 801 /* 802 * See if this vnode or negative entry is already in the cache 803 * with this name. This can happen with concurrent lookups of 804 * the same path name. 805 */ 806 ncpp = NCHHASH(hash); 807 LIST_FOREACH(n2, ncpp, nc_hash) { 808 if (n2->nc_dvp == dvp && 809 n2->nc_nlen == cnp->cn_namelen && 810 !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) { 811 if (tsp != NULL) { 812 KASSERT((n2->nc_flag & NCF_TS) != 0, 813 ("no NCF_TS")); 814 n3 = (struct namecache_ts *)n2; 815 n3->nc_time = 816 ((struct namecache_ts *)ncp)->nc_time; 817 n3->nc_ticks = 818 ((struct namecache_ts *)ncp)->nc_ticks; 819 if (dtsp != NULL) { 820 n3->nc_dotdottime = 821 ((struct namecache_ts *)ncp)-> 822 nc_dotdottime; 823 n3->nc_flag |= NCF_DTS; 824 } 825 } 826 CACHE_WUNLOCK(); 827 cache_free(ncp); 828 return; 829 } 830 } 831 832 if (flag == NCF_ISDOTDOT) { 833 /* 834 * See if we are trying to add .. entry, but some other lookup 835 * has populated v_cache_dd pointer already. 836 */ 837 if (dvp->v_cache_dd != NULL) { 838 CACHE_WUNLOCK(); 839 cache_free(ncp); 840 return; 841 } 842 KASSERT(vp == NULL || vp->v_type == VDIR, 843 ("wrong vnode type %p", vp)); 844 dvp->v_cache_dd = ncp; 845 } 846 847 numcache++; 848 if (!vp) { 849 numneg++; 850 if (cnp->cn_flags & ISWHITEOUT) 851 ncp->nc_flag |= NCF_WHITE; 852 } else if (vp->v_type == VDIR) { 853 if (flag != NCF_ISDOTDOT) { 854 /* 855 * For this case, the cache entry maps both the 856 * directory name in it and the name ".." for the 857 * directory's parent. 858 */ 859 if ((n2 = vp->v_cache_dd) != NULL && 860 (n2->nc_flag & NCF_ISDOTDOT) != 0) 861 cache_zap(n2); 862 vp->v_cache_dd = ncp; 863 } 864 } else { 865 vp->v_cache_dd = NULL; 866 } 867 868 /* 869 * Insert the new namecache entry into the appropriate chain 870 * within the cache entries table. 871 */ 872 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 873 if (flag != NCF_ISDOTDOT) { 874 if (LIST_EMPTY(&dvp->v_cache_src)) { 875 hold = 1; 876 numcachehv++; 877 } 878 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 879 } 880 881 /* 882 * If the entry is "negative", we place it into the 883 * "negative" cache queue, otherwise, we place it into the 884 * destination vnode's cache entries queue. 885 */ 886 if (vp) { 887 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 888 SDT_PROBE(vfs, namecache, enter, done, dvp, nc_get_name(ncp), 889 vp, 0, 0); 890 } else { 891 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 892 SDT_PROBE(vfs, namecache, enter_negative, done, dvp, 893 nc_get_name(ncp), 0, 0, 0); 894 } 895 if (numneg * ncnegfactor > numcache) { 896 ncp = TAILQ_FIRST(&ncneg); 897 zap = 1; 898 } 899 if (hold) 900 vhold(dvp); 901 if (zap) 902 cache_zap(ncp); 903 CACHE_WUNLOCK(); 904 } 905 906 /* 907 * Name cache initialization, from vfs_init() when we are booting 908 */ 909 static void 910 nchinit(void *dummy __unused) 911 { 912 913 TAILQ_INIT(&ncneg); 914 915 cache_zone_small = uma_zcreate("S VFS Cache", 916 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1, 917 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 918 cache_zone_small_ts = uma_zcreate("STS VFS Cache", 919 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1, 920 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 921 cache_zone_large = uma_zcreate("L VFS Cache", 922 sizeof(struct namecache) + NAME_MAX + 1, 923 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 924 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", 925 sizeof(struct namecache_ts) + NAME_MAX + 1, 926 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 927 928 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 929 } 930 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 931 932 933 /* 934 * Invalidate all entries to a particular vnode. 935 */ 936 void 937 cache_purge(vp) 938 struct vnode *vp; 939 { 940 941 CTR1(KTR_VFS, "cache_purge(%p)", vp); 942 SDT_PROBE(vfs, namecache, purge, done, vp, 0, 0, 0, 0); 943 CACHE_WLOCK(); 944 while (!LIST_EMPTY(&vp->v_cache_src)) 945 cache_zap(LIST_FIRST(&vp->v_cache_src)); 946 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 947 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 948 if (vp->v_cache_dd != NULL) { 949 KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT, 950 ("lost dotdot link")); 951 cache_zap(vp->v_cache_dd); 952 } 953 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 954 CACHE_WUNLOCK(); 955 } 956 957 /* 958 * Invalidate all negative entries for a particular directory vnode. 959 */ 960 void 961 cache_purge_negative(vp) 962 struct vnode *vp; 963 { 964 struct namecache *cp, *ncp; 965 966 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp); 967 SDT_PROBE(vfs, namecache, purge_negative, done, vp, 0, 0, 0, 0); 968 CACHE_WLOCK(); 969 LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) { 970 if (cp->nc_vp == NULL) 971 cache_zap(cp); 972 } 973 CACHE_WUNLOCK(); 974 } 975 976 /* 977 * Flush all entries referencing a particular filesystem. 978 */ 979 void 980 cache_purgevfs(mp) 981 struct mount *mp; 982 { 983 struct nchashhead *ncpp; 984 struct namecache *ncp, *nnp; 985 986 /* Scan hash tables for applicable entries */ 987 SDT_PROBE(vfs, namecache, purgevfs, done, mp, 0, 0, 0, 0); 988 CACHE_WLOCK(); 989 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 990 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) { 991 if (ncp->nc_dvp->v_mount == mp) 992 cache_zap(ncp); 993 } 994 } 995 CACHE_WUNLOCK(); 996 } 997 998 /* 999 * Perform canonical checks and cache lookup and pass on to filesystem 1000 * through the vop_cachedlookup only if needed. 1001 */ 1002 1003 int 1004 vfs_cache_lookup(ap) 1005 struct vop_lookup_args /* { 1006 struct vnode *a_dvp; 1007 struct vnode **a_vpp; 1008 struct componentname *a_cnp; 1009 } */ *ap; 1010 { 1011 struct vnode *dvp; 1012 int error; 1013 struct vnode **vpp = ap->a_vpp; 1014 struct componentname *cnp = ap->a_cnp; 1015 struct ucred *cred = cnp->cn_cred; 1016 int flags = cnp->cn_flags; 1017 struct thread *td = cnp->cn_thread; 1018 1019 *vpp = NULL; 1020 dvp = ap->a_dvp; 1021 1022 if (dvp->v_type != VDIR) 1023 return (ENOTDIR); 1024 1025 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 1026 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1027 return (EROFS); 1028 1029 error = VOP_ACCESS(dvp, VEXEC, cred, td); 1030 if (error) 1031 return (error); 1032 1033 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 1034 if (error == 0) 1035 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 1036 if (error == -1) 1037 return (0); 1038 return (error); 1039 } 1040 1041 1042 #ifndef _SYS_SYSPROTO_H_ 1043 struct __getcwd_args { 1044 u_char *buf; 1045 u_int buflen; 1046 }; 1047 #endif 1048 1049 /* 1050 * XXX All of these sysctls would probably be more productive dead. 1051 */ 1052 static int disablecwd; 1053 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 1054 "Disable the getcwd syscall"); 1055 1056 /* Implementation of the getcwd syscall. */ 1057 int 1058 sys___getcwd(td, uap) 1059 struct thread *td; 1060 struct __getcwd_args *uap; 1061 { 1062 1063 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen)); 1064 } 1065 1066 int 1067 kern___getcwd(struct thread *td, u_char *buf, enum uio_seg bufseg, u_int buflen) 1068 { 1069 char *bp, *tmpbuf; 1070 struct filedesc *fdp; 1071 struct vnode *cdir, *rdir; 1072 int error; 1073 1074 if (disablecwd) 1075 return (ENODEV); 1076 if (buflen < 2) 1077 return (EINVAL); 1078 if (buflen > MAXPATHLEN) 1079 buflen = MAXPATHLEN; 1080 1081 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 1082 fdp = td->td_proc->p_fd; 1083 FILEDESC_SLOCK(fdp); 1084 cdir = fdp->fd_cdir; 1085 VREF(cdir); 1086 rdir = fdp->fd_rdir; 1087 VREF(rdir); 1088 FILEDESC_SUNLOCK(fdp); 1089 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 1090 vrele(rdir); 1091 vrele(cdir); 1092 1093 if (!error) { 1094 if (bufseg == UIO_SYSSPACE) 1095 bcopy(bp, buf, strlen(bp) + 1); 1096 else 1097 error = copyout(bp, buf, strlen(bp) + 1); 1098 #ifdef KTRACE 1099 if (KTRPOINT(curthread, KTR_NAMEI)) 1100 ktrnamei(bp); 1101 #endif 1102 } 1103 free(tmpbuf, M_TEMP); 1104 return (error); 1105 } 1106 1107 /* 1108 * Thus begins the fullpath magic. 1109 */ 1110 1111 #undef STATNODE 1112 #define STATNODE(name, descr) \ 1113 static u_int name; \ 1114 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr) 1115 1116 static int disablefullpath; 1117 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 1118 "Disable the vn_fullpath function"); 1119 1120 /* These count for kern___getcwd(), too. */ 1121 STATNODE(numfullpathcalls, "Number of fullpath search calls"); 1122 STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 1123 STATNODE(numfullpathfail2, 1124 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 1125 STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 1126 STATNODE(numfullpathfound, "Number of successful fullpath calls"); 1127 1128 /* 1129 * Retrieve the full filesystem path that correspond to a vnode from the name 1130 * cache (if available) 1131 */ 1132 int 1133 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 1134 { 1135 char *buf; 1136 struct filedesc *fdp; 1137 struct vnode *rdir; 1138 int error; 1139 1140 if (disablefullpath) 1141 return (ENODEV); 1142 if (vn == NULL) 1143 return (EINVAL); 1144 1145 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1146 fdp = td->td_proc->p_fd; 1147 FILEDESC_SLOCK(fdp); 1148 rdir = fdp->fd_rdir; 1149 VREF(rdir); 1150 FILEDESC_SUNLOCK(fdp); 1151 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 1152 vrele(rdir); 1153 1154 if (!error) 1155 *freebuf = buf; 1156 else 1157 free(buf, M_TEMP); 1158 return (error); 1159 } 1160 1161 /* 1162 * This function is similar to vn_fullpath, but it attempts to lookup the 1163 * pathname relative to the global root mount point. This is required for the 1164 * auditing sub-system, as audited pathnames must be absolute, relative to the 1165 * global root mount point. 1166 */ 1167 int 1168 vn_fullpath_global(struct thread *td, struct vnode *vn, 1169 char **retbuf, char **freebuf) 1170 { 1171 char *buf; 1172 int error; 1173 1174 if (disablefullpath) 1175 return (ENODEV); 1176 if (vn == NULL) 1177 return (EINVAL); 1178 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1179 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 1180 if (!error) 1181 *freebuf = buf; 1182 else 1183 free(buf, M_TEMP); 1184 return (error); 1185 } 1186 1187 int 1188 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) 1189 { 1190 int error; 1191 1192 CACHE_RLOCK(); 1193 error = vn_vptocnp_locked(vp, cred, buf, buflen); 1194 if (error == 0) 1195 CACHE_RUNLOCK(); 1196 return (error); 1197 } 1198 1199 static int 1200 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 1201 u_int *buflen) 1202 { 1203 struct vnode *dvp; 1204 struct namecache *ncp; 1205 int error; 1206 1207 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) { 1208 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1209 break; 1210 } 1211 if (ncp != NULL) { 1212 if (*buflen < ncp->nc_nlen) { 1213 CACHE_RUNLOCK(); 1214 vrele(*vp); 1215 numfullpathfail4++; 1216 error = ENOMEM; 1217 SDT_PROBE(vfs, namecache, fullpath, return, error, 1218 vp, NULL, 0, 0); 1219 return (error); 1220 } 1221 *buflen -= ncp->nc_nlen; 1222 memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen); 1223 SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp, 1224 nc_get_name(ncp), vp, 0, 0); 1225 dvp = *vp; 1226 *vp = ncp->nc_dvp; 1227 vref(*vp); 1228 CACHE_RUNLOCK(); 1229 vrele(dvp); 1230 CACHE_RLOCK(); 1231 return (0); 1232 } 1233 SDT_PROBE(vfs, namecache, fullpath, miss, vp, 0, 0, 0, 0); 1234 1235 CACHE_RUNLOCK(); 1236 vn_lock(*vp, LK_SHARED | LK_RETRY); 1237 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen); 1238 vput(*vp); 1239 if (error) { 1240 numfullpathfail2++; 1241 SDT_PROBE(vfs, namecache, fullpath, return, error, vp, 1242 NULL, 0, 0); 1243 return (error); 1244 } 1245 1246 *vp = dvp; 1247 CACHE_RLOCK(); 1248 if (dvp->v_iflag & VI_DOOMED) { 1249 /* forced unmount */ 1250 CACHE_RUNLOCK(); 1251 vrele(dvp); 1252 error = ENOENT; 1253 SDT_PROBE(vfs, namecache, fullpath, return, error, vp, 1254 NULL, 0, 0); 1255 return (error); 1256 } 1257 /* 1258 * *vp has its use count incremented still. 1259 */ 1260 1261 return (0); 1262 } 1263 1264 /* 1265 * The magic behind kern___getcwd() and vn_fullpath(). 1266 */ 1267 static int 1268 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 1269 char *buf, char **retbuf, u_int buflen) 1270 { 1271 int error, slash_prefixed; 1272 #ifdef KDTRACE_HOOKS 1273 struct vnode *startvp = vp; 1274 #endif 1275 struct vnode *vp1; 1276 1277 buflen--; 1278 buf[buflen] = '\0'; 1279 error = 0; 1280 slash_prefixed = 0; 1281 1282 SDT_PROBE(vfs, namecache, fullpath, entry, vp, 0, 0, 0, 0); 1283 numfullpathcalls++; 1284 vref(vp); 1285 CACHE_RLOCK(); 1286 if (vp->v_type != VDIR) { 1287 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1288 if (error) 1289 return (error); 1290 if (buflen == 0) { 1291 CACHE_RUNLOCK(); 1292 vrele(vp); 1293 return (ENOMEM); 1294 } 1295 buf[--buflen] = '/'; 1296 slash_prefixed = 1; 1297 } 1298 while (vp != rdir && vp != rootvnode) { 1299 if (vp->v_vflag & VV_ROOT) { 1300 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */ 1301 CACHE_RUNLOCK(); 1302 vrele(vp); 1303 error = ENOENT; 1304 SDT_PROBE(vfs, namecache, fullpath, return, 1305 error, vp, NULL, 0, 0); 1306 break; 1307 } 1308 vp1 = vp->v_mount->mnt_vnodecovered; 1309 vref(vp1); 1310 CACHE_RUNLOCK(); 1311 vrele(vp); 1312 vp = vp1; 1313 CACHE_RLOCK(); 1314 continue; 1315 } 1316 if (vp->v_type != VDIR) { 1317 CACHE_RUNLOCK(); 1318 vrele(vp); 1319 numfullpathfail1++; 1320 error = ENOTDIR; 1321 SDT_PROBE(vfs, namecache, fullpath, return, 1322 error, vp, NULL, 0, 0); 1323 break; 1324 } 1325 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1326 if (error) 1327 break; 1328 if (buflen == 0) { 1329 CACHE_RUNLOCK(); 1330 vrele(vp); 1331 error = ENOMEM; 1332 SDT_PROBE(vfs, namecache, fullpath, return, error, 1333 startvp, NULL, 0, 0); 1334 break; 1335 } 1336 buf[--buflen] = '/'; 1337 slash_prefixed = 1; 1338 } 1339 if (error) 1340 return (error); 1341 if (!slash_prefixed) { 1342 if (buflen == 0) { 1343 CACHE_RUNLOCK(); 1344 vrele(vp); 1345 numfullpathfail4++; 1346 SDT_PROBE(vfs, namecache, fullpath, return, ENOMEM, 1347 startvp, NULL, 0, 0); 1348 return (ENOMEM); 1349 } 1350 buf[--buflen] = '/'; 1351 } 1352 numfullpathfound++; 1353 CACHE_RUNLOCK(); 1354 vrele(vp); 1355 1356 SDT_PROBE(vfs, namecache, fullpath, return, 0, startvp, buf + buflen, 1357 0, 0); 1358 *retbuf = buf + buflen; 1359 return (0); 1360 } 1361 1362 struct vnode * 1363 vn_dir_dd_ino(struct vnode *vp) 1364 { 1365 struct namecache *ncp; 1366 struct vnode *ddvp; 1367 1368 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 1369 CACHE_RLOCK(); 1370 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 1371 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 1372 continue; 1373 ddvp = ncp->nc_dvp; 1374 VI_LOCK(ddvp); 1375 CACHE_RUNLOCK(); 1376 if (vget(ddvp, LK_INTERLOCK | LK_SHARED | LK_NOWAIT, curthread)) 1377 return (NULL); 1378 return (ddvp); 1379 } 1380 CACHE_RUNLOCK(); 1381 return (NULL); 1382 } 1383 1384 int 1385 vn_commname(struct vnode *vp, char *buf, u_int buflen) 1386 { 1387 struct namecache *ncp; 1388 int l; 1389 1390 CACHE_RLOCK(); 1391 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 1392 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1393 break; 1394 if (ncp == NULL) { 1395 CACHE_RUNLOCK(); 1396 return (ENOENT); 1397 } 1398 l = min(ncp->nc_nlen, buflen - 1); 1399 memcpy(buf, nc_get_name(ncp), l); 1400 CACHE_RUNLOCK(); 1401 buf[l] = '\0'; 1402 return (0); 1403 } 1404 1405 /* ABI compat shims for old kernel modules. */ 1406 #undef cache_enter 1407 1408 void cache_enter(struct vnode *dvp, struct vnode *vp, 1409 struct componentname *cnp); 1410 1411 void 1412 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 1413 { 1414 1415 cache_enter_time(dvp, vp, cnp, NULL, NULL); 1416 } 1417 1418 /* 1419 * This function updates path string to vnode's full global path 1420 * and checks the size of the new path string against the pathlen argument. 1421 * 1422 * Requires a locked, referenced vnode and GIANT lock held. 1423 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 1424 * 1425 * If sysctl debug.disablefullpath is set, ENODEV is returned, 1426 * vnode is left locked and path remain untouched. 1427 * 1428 * If vp is a directory, the call to vn_fullpath_global() always succeeds 1429 * because it falls back to the ".." lookup if the namecache lookup fails. 1430 */ 1431 int 1432 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 1433 u_int pathlen) 1434 { 1435 struct nameidata nd; 1436 struct vnode *vp1; 1437 char *rpath, *fbuf; 1438 int error; 1439 1440 ASSERT_VOP_ELOCKED(vp, __func__); 1441 1442 /* Return ENODEV if sysctl debug.disablefullpath==1 */ 1443 if (disablefullpath) 1444 return (ENODEV); 1445 1446 /* Construct global filesystem path from vp. */ 1447 VOP_UNLOCK(vp, 0); 1448 error = vn_fullpath_global(td, vp, &rpath, &fbuf); 1449 1450 if (error != 0) { 1451 vrele(vp); 1452 return (error); 1453 } 1454 1455 if (strlen(rpath) >= pathlen) { 1456 vrele(vp); 1457 error = ENAMETOOLONG; 1458 goto out; 1459 } 1460 1461 /* 1462 * Re-lookup the vnode by path to detect a possible rename. 1463 * As a side effect, the vnode is relocked. 1464 * If vnode was renamed, return ENOENT. 1465 */ 1466 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 1467 UIO_SYSSPACE, path, td); 1468 error = namei(&nd); 1469 if (error != 0) { 1470 vrele(vp); 1471 goto out; 1472 } 1473 NDFREE(&nd, NDF_ONLY_PNBUF); 1474 vp1 = nd.ni_vp; 1475 vrele(vp); 1476 if (vp1 == vp) 1477 strcpy(path, rpath); 1478 else { 1479 vput(vp1); 1480 error = ENOENT; 1481 } 1482 1483 out: 1484 free(fbuf, M_TEMP); 1485 return (error); 1486 } 1487