1 /*- 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 4. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 33 */ 34 35 #include <sys/cdefs.h> 36 __FBSDID("$FreeBSD$"); 37 38 #include "opt_ktrace.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/filedesc.h> 43 #include <sys/fnv_hash.h> 44 #include <sys/kernel.h> 45 #include <sys/lock.h> 46 #include <sys/malloc.h> 47 #include <sys/fcntl.h> 48 #include <sys/mount.h> 49 #include <sys/namei.h> 50 #include <sys/proc.h> 51 #include <sys/rwlock.h> 52 #include <sys/sdt.h> 53 #include <sys/syscallsubr.h> 54 #include <sys/sysctl.h> 55 #include <sys/sysproto.h> 56 #include <sys/vnode.h> 57 #ifdef KTRACE 58 #include <sys/ktrace.h> 59 #endif 60 61 #include <vm/uma.h> 62 63 SDT_PROVIDER_DECLARE(vfs); 64 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *", "char *", 65 "struct vnode *"); 66 SDT_PROBE_DEFINE2(vfs, namecache, enter_negative, done, "struct vnode *", 67 "char *"); 68 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, entry, "struct vnode *"); 69 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, hit, "struct vnode *", 70 "char *", "struct vnode *"); 71 SDT_PROBE_DEFINE1(vfs, namecache, fullpath, miss, "struct vnode *"); 72 SDT_PROBE_DEFINE3(vfs, namecache, fullpath, return, "int", 73 "struct vnode *", "char *"); 74 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *", "char *", 75 "struct vnode *"); 76 SDT_PROBE_DEFINE2(vfs, namecache, lookup, hit__negative, 77 "struct vnode *", "char *"); 78 SDT_PROBE_DEFINE2(vfs, namecache, lookup, miss, "struct vnode *", 79 "char *"); 80 SDT_PROBE_DEFINE1(vfs, namecache, purge, done, "struct vnode *"); 81 SDT_PROBE_DEFINE1(vfs, namecache, purge_negative, done, "struct vnode *"); 82 SDT_PROBE_DEFINE1(vfs, namecache, purgevfs, done, "struct mount *"); 83 SDT_PROBE_DEFINE3(vfs, namecache, zap, done, "struct vnode *", "char *", 84 "struct vnode *"); 85 SDT_PROBE_DEFINE2(vfs, namecache, zap_negative, done, "struct vnode *", 86 "char *"); 87 88 /* 89 * This structure describes the elements in the cache of recent 90 * names looked up by namei. 91 */ 92 93 struct namecache { 94 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 95 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 96 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 97 struct vnode *nc_dvp; /* vnode of parent of name */ 98 struct vnode *nc_vp; /* vnode the name refers to */ 99 u_char nc_flag; /* flag bits */ 100 u_char nc_nlen; /* length of name */ 101 char nc_name[0]; /* segment name + nul */ 102 }; 103 104 /* 105 * struct namecache_ts repeats struct namecache layout up to the 106 * nc_nlen member. 107 * struct namecache_ts is used in place of struct namecache when time(s) need 108 * to be stored. The nc_dotdottime field is used when a cache entry is mapping 109 * both a non-dotdot directory name plus dotdot for the directory's 110 * parent. 111 */ 112 struct namecache_ts { 113 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 114 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 115 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 116 struct vnode *nc_dvp; /* vnode of parent of name */ 117 struct vnode *nc_vp; /* vnode the name refers to */ 118 u_char nc_flag; /* flag bits */ 119 u_char nc_nlen; /* length of name */ 120 struct timespec nc_time; /* timespec provided by fs */ 121 struct timespec nc_dotdottime; /* dotdot timespec provided by fs */ 122 int nc_ticks; /* ticks value when entry was added */ 123 char nc_name[0]; /* segment name + nul */ 124 }; 125 126 /* 127 * Flags in namecache.nc_flag 128 */ 129 #define NCF_WHITE 0x01 130 #define NCF_ISDOTDOT 0x02 131 #define NCF_TS 0x04 132 #define NCF_DTS 0x08 133 134 /* 135 * Name caching works as follows: 136 * 137 * Names found by directory scans are retained in a cache 138 * for future reference. It is managed LRU, so frequently 139 * used names will hang around. Cache is indexed by hash value 140 * obtained from (vp, name) where vp refers to the directory 141 * containing name. 142 * 143 * If it is a "negative" entry, (i.e. for a name that is known NOT to 144 * exist) the vnode pointer will be NULL. 145 * 146 * Upon reaching the last segment of a path, if the reference 147 * is for DELETE, or NOCACHE is set (rewrite), and the 148 * name is located in the cache, it will be dropped. 149 */ 150 151 /* 152 * Structures associated with name cacheing. 153 */ 154 #define NCHHASH(hash) \ 155 (&nchashtbl[(hash) & nchash]) 156 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 157 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 158 static u_long nchash; /* size of hash table */ 159 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, 160 "Size of namecache hash table"); 161 static u_long ncnegfactor = 16; /* ratio of negative entries */ 162 SYSCTL_ULONG(_vfs, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, 163 "Ratio of negative namecache entries"); 164 static u_long numneg; /* number of negative entries allocated */ 165 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, 166 "Number of negative entries in namecache"); 167 static u_long numcache; /* number of cache entries allocated */ 168 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, 169 "Number of namecache entries"); 170 static u_long numcachehv; /* number of cache entries with vnodes held */ 171 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, 172 "Number of namecache entries with vnodes held"); 173 static u_int ncsizefactor = 2; 174 SYSCTL_UINT(_vfs, OID_AUTO, ncsizefactor, CTLFLAG_RW, &ncsizefactor, 0, 175 "Size factor for namecache"); 176 177 struct nchstats nchstats; /* cache effectiveness statistics */ 178 179 static struct rwlock cache_lock; 180 RW_SYSINIT(vfscache, &cache_lock, "Name Cache"); 181 182 #define CACHE_UPGRADE_LOCK() rw_try_upgrade(&cache_lock) 183 #define CACHE_RLOCK() rw_rlock(&cache_lock) 184 #define CACHE_RUNLOCK() rw_runlock(&cache_lock) 185 #define CACHE_WLOCK() rw_wlock(&cache_lock) 186 #define CACHE_WUNLOCK() rw_wunlock(&cache_lock) 187 188 /* 189 * UMA zones for the VFS cache. 190 * 191 * The small cache is used for entries with short names, which are the 192 * most common. The large cache is used for entries which are too big to 193 * fit in the small cache. 194 */ 195 static uma_zone_t cache_zone_small; 196 static uma_zone_t cache_zone_small_ts; 197 static uma_zone_t cache_zone_large; 198 static uma_zone_t cache_zone_large_ts; 199 200 #define CACHE_PATH_CUTOFF 35 201 202 static struct namecache * 203 cache_alloc(int len, int ts) 204 { 205 206 if (len > CACHE_PATH_CUTOFF) { 207 if (ts) 208 return (uma_zalloc(cache_zone_large_ts, M_WAITOK)); 209 else 210 return (uma_zalloc(cache_zone_large, M_WAITOK)); 211 } 212 if (ts) 213 return (uma_zalloc(cache_zone_small_ts, M_WAITOK)); 214 else 215 return (uma_zalloc(cache_zone_small, M_WAITOK)); 216 } 217 218 static void 219 cache_free(struct namecache *ncp) 220 { 221 int ts; 222 223 if (ncp == NULL) 224 return; 225 ts = ncp->nc_flag & NCF_TS; 226 if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) { 227 if (ts) 228 uma_zfree(cache_zone_small_ts, ncp); 229 else 230 uma_zfree(cache_zone_small, ncp); 231 } else if (ts) 232 uma_zfree(cache_zone_large_ts, ncp); 233 else 234 uma_zfree(cache_zone_large, ncp); 235 } 236 237 static char * 238 nc_get_name(struct namecache *ncp) 239 { 240 struct namecache_ts *ncp_ts; 241 242 if ((ncp->nc_flag & NCF_TS) == 0) 243 return (ncp->nc_name); 244 ncp_ts = (struct namecache_ts *)ncp; 245 return (ncp_ts->nc_name); 246 } 247 248 static void 249 cache_out_ts(struct namecache *ncp, struct timespec *tsp, int *ticksp) 250 { 251 252 KASSERT((ncp->nc_flag & NCF_TS) != 0 || 253 (tsp == NULL && ticksp == NULL), 254 ("No NCF_TS")); 255 256 if (tsp != NULL) 257 *tsp = ((struct namecache_ts *)ncp)->nc_time; 258 if (ticksp != NULL) 259 *ticksp = ((struct namecache_ts *)ncp)->nc_ticks; 260 } 261 262 static int doingcache = 1; /* 1 => enable the cache */ 263 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, 264 "VFS namecache enabled"); 265 266 /* Export size information to userland */ 267 SYSCTL_INT(_debug_sizeof, OID_AUTO, namecache, CTLFLAG_RD, SYSCTL_NULL_INT_PTR, 268 sizeof(struct namecache), "sizeof(struct namecache)"); 269 270 /* 271 * The new name cache statistics 272 */ 273 static SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, 274 "Name cache statistics"); 275 #define STATNODE(mode, name, var, descr) \ 276 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, descr); 277 STATNODE(CTLFLAG_RD, numneg, &numneg, "Number of negative cache entries"); 278 STATNODE(CTLFLAG_RD, numcache, &numcache, "Number of cache entries"); 279 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls, 280 "Number of cache lookups"); 281 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits, 282 "Number of '.' hits"); 283 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits, 284 "Number of '..' hits"); 285 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks, 286 "Number of checks in lookup"); 287 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss, 288 "Number of cache misses"); 289 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap, 290 "Number of cache misses we do not want to cache"); 291 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps, 292 "Number of cache hits (positive) we do not want to cache"); 293 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits, 294 "Number of cache hits (positive)"); 295 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps, 296 "Number of cache hits (negative) we do not want to cache"); 297 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits, 298 "Number of cache hits (negative)"); 299 static u_long numupgrades; STATNODE(CTLFLAG_RD, numupgrades, &numupgrades, 300 "Number of updates of the cache after lookup (write lock + retry)"); 301 302 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD | CTLFLAG_MPSAFE, 303 &nchstats, sizeof(nchstats), "LU", 304 "VFS cache effectiveness statistics"); 305 306 static void cache_zap(struct namecache *ncp); 307 static int vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 308 u_int *buflen); 309 static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 310 char *buf, char **retbuf, u_int buflen); 311 312 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 313 314 #ifdef DIAGNOSTIC 315 /* 316 * Grab an atomic snapshot of the name cache hash chain lengths 317 */ 318 static SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, 319 "hash table stats"); 320 321 static int 322 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS) 323 { 324 struct nchashhead *ncpp; 325 struct namecache *ncp; 326 int i, error, n_nchash, *cntbuf; 327 328 retry: 329 n_nchash = nchash + 1; /* nchash is max index, not count */ 330 if (req->oldptr == NULL) 331 return SYSCTL_OUT(req, 0, n_nchash * sizeof(int)); 332 cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK); 333 CACHE_RLOCK(); 334 if (n_nchash != nchash + 1) { 335 CACHE_RUNLOCK(); 336 free(cntbuf, M_TEMP); 337 goto retry; 338 } 339 /* Scan hash tables counting entries */ 340 for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++) 341 LIST_FOREACH(ncp, ncpp, nc_hash) 342 cntbuf[i]++; 343 CACHE_RUNLOCK(); 344 for (error = 0, i = 0; i < n_nchash; i++) 345 if ((error = SYSCTL_OUT(req, &cntbuf[i], sizeof(int))) != 0) 346 break; 347 free(cntbuf, M_TEMP); 348 return (error); 349 } 350 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD| 351 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_rawnchash, "S,int", 352 "nchash chain lengths"); 353 354 static int 355 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS) 356 { 357 int error; 358 struct nchashhead *ncpp; 359 struct namecache *ncp; 360 int n_nchash; 361 int count, maxlength, used, pct; 362 363 if (!req->oldptr) 364 return SYSCTL_OUT(req, 0, 4 * sizeof(int)); 365 366 CACHE_RLOCK(); 367 n_nchash = nchash + 1; /* nchash is max index, not count */ 368 used = 0; 369 maxlength = 0; 370 371 /* Scan hash tables for applicable entries */ 372 for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) { 373 count = 0; 374 LIST_FOREACH(ncp, ncpp, nc_hash) { 375 count++; 376 } 377 if (count) 378 used++; 379 if (maxlength < count) 380 maxlength = count; 381 } 382 n_nchash = nchash + 1; 383 CACHE_RUNLOCK(); 384 pct = (used * 100) / (n_nchash / 100); 385 error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash)); 386 if (error) 387 return (error); 388 error = SYSCTL_OUT(req, &used, sizeof(used)); 389 if (error) 390 return (error); 391 error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength)); 392 if (error) 393 return (error); 394 error = SYSCTL_OUT(req, &pct, sizeof(pct)); 395 if (error) 396 return (error); 397 return (0); 398 } 399 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD| 400 CTLFLAG_MPSAFE, 0, 0, sysctl_debug_hashstat_nchash, "I", 401 "nchash statistics (number of total/used buckets, maximum chain length, usage percentage)"); 402 #endif 403 404 /* 405 * cache_zap(): 406 * 407 * Removes a namecache entry from cache, whether it contains an actual 408 * pointer to a vnode or if it is just a negative cache entry. 409 */ 410 static void 411 cache_zap(struct namecache *ncp) 412 { 413 struct vnode *vp; 414 415 rw_assert(&cache_lock, RA_WLOCKED); 416 CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp, ncp->nc_vp); 417 if (ncp->nc_vp != NULL) { 418 SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp, 419 nc_get_name(ncp), ncp->nc_vp); 420 } else { 421 SDT_PROBE2(vfs, namecache, zap_negative, done, ncp->nc_dvp, 422 nc_get_name(ncp)); 423 } 424 vp = NULL; 425 LIST_REMOVE(ncp, nc_hash); 426 if (ncp->nc_flag & NCF_ISDOTDOT) { 427 if (ncp == ncp->nc_dvp->v_cache_dd) 428 ncp->nc_dvp->v_cache_dd = NULL; 429 } else { 430 LIST_REMOVE(ncp, nc_src); 431 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) { 432 vp = ncp->nc_dvp; 433 numcachehv--; 434 } 435 } 436 if (ncp->nc_vp) { 437 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 438 if (ncp == ncp->nc_vp->v_cache_dd) 439 ncp->nc_vp->v_cache_dd = NULL; 440 } else { 441 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 442 numneg--; 443 } 444 numcache--; 445 cache_free(ncp); 446 if (vp != NULL) 447 vdrop(vp); 448 } 449 450 /* 451 * Lookup an entry in the cache 452 * 453 * Lookup is called with dvp pointing to the directory to search, 454 * cnp pointing to the name of the entry being sought. If the lookup 455 * succeeds, the vnode is returned in *vpp, and a status of -1 is 456 * returned. If the lookup determines that the name does not exist 457 * (negative cacheing), a status of ENOENT is returned. If the lookup 458 * fails, a status of zero is returned. If the directory vnode is 459 * recycled out from under us due to a forced unmount, a status of 460 * ENOENT is returned. 461 * 462 * vpp is locked and ref'd on return. If we're looking up DOTDOT, dvp is 463 * unlocked. If we're looking up . an extra ref is taken, but the lock is 464 * not recursively acquired. 465 */ 466 467 int 468 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, 469 struct timespec *tsp, int *ticksp) 470 { 471 struct namecache *ncp; 472 uint32_t hash; 473 int error, ltype, wlocked; 474 475 if (!doingcache) { 476 cnp->cn_flags &= ~MAKEENTRY; 477 return (0); 478 } 479 retry: 480 CACHE_RLOCK(); 481 wlocked = 0; 482 numcalls++; 483 error = 0; 484 485 retry_wlocked: 486 if (cnp->cn_nameptr[0] == '.') { 487 if (cnp->cn_namelen == 1) { 488 *vpp = dvp; 489 CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .", 490 dvp, cnp->cn_nameptr); 491 dothits++; 492 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp); 493 if (tsp != NULL) 494 timespecclear(tsp); 495 if (ticksp != NULL) 496 *ticksp = ticks; 497 goto success; 498 } 499 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 500 dotdothits++; 501 if (dvp->v_cache_dd == NULL) { 502 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, 503 "..", NULL); 504 goto unlock; 505 } 506 if ((cnp->cn_flags & MAKEENTRY) == 0) { 507 if (!wlocked && !CACHE_UPGRADE_LOCK()) 508 goto wlock; 509 if (dvp->v_cache_dd->nc_flag & NCF_ISDOTDOT) 510 cache_zap(dvp->v_cache_dd); 511 dvp->v_cache_dd = NULL; 512 CACHE_WUNLOCK(); 513 return (0); 514 } 515 ncp = dvp->v_cache_dd; 516 if (ncp->nc_flag & NCF_ISDOTDOT) 517 *vpp = ncp->nc_vp; 518 else 519 *vpp = ncp->nc_dvp; 520 /* Return failure if negative entry was found. */ 521 if (*vpp == NULL) 522 goto negative_success; 523 CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..", 524 dvp, cnp->cn_nameptr, *vpp); 525 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", 526 *vpp); 527 cache_out_ts(ncp, tsp, ticksp); 528 if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) == 529 NCF_DTS && tsp != NULL) 530 *tsp = ((struct namecache_ts *)ncp)-> 531 nc_dotdottime; 532 goto success; 533 } 534 } 535 536 hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT); 537 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 538 LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) { 539 numchecks++; 540 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 541 !bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen)) 542 break; 543 } 544 545 /* We failed to find an entry */ 546 if (ncp == NULL) { 547 SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr, 548 NULL); 549 if ((cnp->cn_flags & MAKEENTRY) == 0) { 550 nummisszap++; 551 } else { 552 nummiss++; 553 } 554 nchstats.ncs_miss++; 555 goto unlock; 556 } 557 558 /* We don't want to have an entry, so dump it */ 559 if ((cnp->cn_flags & MAKEENTRY) == 0) { 560 numposzaps++; 561 nchstats.ncs_badhits++; 562 if (!wlocked && !CACHE_UPGRADE_LOCK()) 563 goto wlock; 564 cache_zap(ncp); 565 CACHE_WUNLOCK(); 566 return (0); 567 } 568 569 /* We found a "positive" match, return the vnode */ 570 if (ncp->nc_vp) { 571 numposhits++; 572 nchstats.ncs_goodhits++; 573 *vpp = ncp->nc_vp; 574 CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p", 575 dvp, cnp->cn_nameptr, *vpp, ncp); 576 SDT_PROBE3(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp), 577 *vpp); 578 cache_out_ts(ncp, tsp, ticksp); 579 goto success; 580 } 581 582 negative_success: 583 /* We found a negative match, and want to create it, so purge */ 584 if (cnp->cn_nameiop == CREATE) { 585 numnegzaps++; 586 nchstats.ncs_badhits++; 587 if (!wlocked && !CACHE_UPGRADE_LOCK()) 588 goto wlock; 589 cache_zap(ncp); 590 CACHE_WUNLOCK(); 591 return (0); 592 } 593 594 if (!wlocked && !CACHE_UPGRADE_LOCK()) 595 goto wlock; 596 numneghits++; 597 /* 598 * We found a "negative" match, so we shift it to the end of 599 * the "negative" cache entries queue to satisfy LRU. Also, 600 * check to see if the entry is a whiteout; indicate this to 601 * the componentname, if so. 602 */ 603 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 604 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 605 nchstats.ncs_neghits++; 606 if (ncp->nc_flag & NCF_WHITE) 607 cnp->cn_flags |= ISWHITEOUT; 608 SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, 609 nc_get_name(ncp)); 610 cache_out_ts(ncp, tsp, ticksp); 611 CACHE_WUNLOCK(); 612 return (ENOENT); 613 614 wlock: 615 /* 616 * We need to update the cache after our lookup, so upgrade to 617 * a write lock and retry the operation. 618 */ 619 CACHE_RUNLOCK(); 620 CACHE_WLOCK(); 621 numupgrades++; 622 wlocked = 1; 623 goto retry_wlocked; 624 625 success: 626 /* 627 * On success we return a locked and ref'd vnode as per the lookup 628 * protocol. 629 */ 630 if (dvp == *vpp) { /* lookup on "." */ 631 VREF(*vpp); 632 if (wlocked) 633 CACHE_WUNLOCK(); 634 else 635 CACHE_RUNLOCK(); 636 /* 637 * When we lookup "." we still can be asked to lock it 638 * differently... 639 */ 640 ltype = cnp->cn_lkflags & LK_TYPE_MASK; 641 if (ltype != VOP_ISLOCKED(*vpp)) { 642 if (ltype == LK_EXCLUSIVE) { 643 vn_lock(*vpp, LK_UPGRADE | LK_RETRY); 644 if ((*vpp)->v_iflag & VI_DOOMED) { 645 /* forced unmount */ 646 vrele(*vpp); 647 *vpp = NULL; 648 return (ENOENT); 649 } 650 } else 651 vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY); 652 } 653 return (-1); 654 } 655 ltype = 0; /* silence gcc warning */ 656 if (cnp->cn_flags & ISDOTDOT) { 657 ltype = VOP_ISLOCKED(dvp); 658 VOP_UNLOCK(dvp, 0); 659 } 660 vhold(*vpp); 661 if (wlocked) 662 CACHE_WUNLOCK(); 663 else 664 CACHE_RUNLOCK(); 665 error = vget(*vpp, cnp->cn_lkflags | LK_VNHELD, cnp->cn_thread); 666 if (cnp->cn_flags & ISDOTDOT) { 667 vn_lock(dvp, ltype | LK_RETRY); 668 if (dvp->v_iflag & VI_DOOMED) { 669 if (error == 0) 670 vput(*vpp); 671 *vpp = NULL; 672 return (ENOENT); 673 } 674 } 675 if (error) { 676 *vpp = NULL; 677 goto retry; 678 } 679 if ((cnp->cn_flags & ISLASTCN) && 680 (cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) { 681 ASSERT_VOP_ELOCKED(*vpp, "cache_lookup"); 682 } 683 return (-1); 684 685 unlock: 686 if (wlocked) 687 CACHE_WUNLOCK(); 688 else 689 CACHE_RUNLOCK(); 690 return (0); 691 } 692 693 /* 694 * Add an entry to the cache. 695 */ 696 void 697 cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, 698 struct timespec *tsp, struct timespec *dtsp) 699 { 700 struct namecache *ncp, *n2; 701 struct namecache_ts *n3; 702 struct nchashhead *ncpp; 703 uint32_t hash; 704 int flag; 705 int hold; 706 int zap; 707 int len; 708 709 CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); 710 VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, 711 ("cache_enter: Adding a doomed vnode")); 712 VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, 713 ("cache_enter: Doomed vnode used as src")); 714 715 if (!doingcache) 716 return; 717 718 /* 719 * Avoid blowout in namecache entries. 720 */ 721 if (numcache >= desiredvnodes * ncsizefactor) 722 return; 723 724 flag = 0; 725 if (cnp->cn_nameptr[0] == '.') { 726 if (cnp->cn_namelen == 1) 727 return; 728 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 729 CACHE_WLOCK(); 730 /* 731 * If dotdot entry already exists, just retarget it 732 * to new parent vnode, otherwise continue with new 733 * namecache entry allocation. 734 */ 735 if ((ncp = dvp->v_cache_dd) != NULL && 736 ncp->nc_flag & NCF_ISDOTDOT) { 737 KASSERT(ncp->nc_dvp == dvp, 738 ("wrong isdotdot parent")); 739 if (ncp->nc_vp != NULL) { 740 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, 741 ncp, nc_dst); 742 } else { 743 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 744 numneg--; 745 } 746 if (vp != NULL) { 747 TAILQ_INSERT_HEAD(&vp->v_cache_dst, 748 ncp, nc_dst); 749 } else { 750 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 751 numneg++; 752 } 753 ncp->nc_vp = vp; 754 CACHE_WUNLOCK(); 755 return; 756 } 757 dvp->v_cache_dd = NULL; 758 SDT_PROBE3(vfs, namecache, enter, done, dvp, "..", vp); 759 CACHE_WUNLOCK(); 760 flag = NCF_ISDOTDOT; 761 } 762 } 763 764 hold = 0; 765 zap = 0; 766 767 /* 768 * Calculate the hash key and setup as much of the new 769 * namecache entry as possible before acquiring the lock. 770 */ 771 ncp = cache_alloc(cnp->cn_namelen, tsp != NULL); 772 ncp->nc_vp = vp; 773 ncp->nc_dvp = dvp; 774 ncp->nc_flag = flag; 775 if (tsp != NULL) { 776 n3 = (struct namecache_ts *)ncp; 777 n3->nc_time = *tsp; 778 n3->nc_ticks = ticks; 779 n3->nc_flag |= NCF_TS; 780 if (dtsp != NULL) { 781 n3->nc_dotdottime = *dtsp; 782 n3->nc_flag |= NCF_DTS; 783 } 784 } 785 len = ncp->nc_nlen = cnp->cn_namelen; 786 hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT); 787 strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1); 788 hash = fnv_32_buf(&dvp, sizeof(dvp), hash); 789 CACHE_WLOCK(); 790 791 /* 792 * See if this vnode or negative entry is already in the cache 793 * with this name. This can happen with concurrent lookups of 794 * the same path name. 795 */ 796 ncpp = NCHHASH(hash); 797 LIST_FOREACH(n2, ncpp, nc_hash) { 798 if (n2->nc_dvp == dvp && 799 n2->nc_nlen == cnp->cn_namelen && 800 !bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) { 801 if (tsp != NULL) { 802 KASSERT((n2->nc_flag & NCF_TS) != 0, 803 ("no NCF_TS")); 804 n3 = (struct namecache_ts *)n2; 805 n3->nc_time = 806 ((struct namecache_ts *)ncp)->nc_time; 807 n3->nc_ticks = 808 ((struct namecache_ts *)ncp)->nc_ticks; 809 if (dtsp != NULL) { 810 n3->nc_dotdottime = 811 ((struct namecache_ts *)ncp)-> 812 nc_dotdottime; 813 n3->nc_flag |= NCF_DTS; 814 } 815 } 816 CACHE_WUNLOCK(); 817 cache_free(ncp); 818 return; 819 } 820 } 821 822 if (flag == NCF_ISDOTDOT) { 823 /* 824 * See if we are trying to add .. entry, but some other lookup 825 * has populated v_cache_dd pointer already. 826 */ 827 if (dvp->v_cache_dd != NULL) { 828 CACHE_WUNLOCK(); 829 cache_free(ncp); 830 return; 831 } 832 KASSERT(vp == NULL || vp->v_type == VDIR, 833 ("wrong vnode type %p", vp)); 834 dvp->v_cache_dd = ncp; 835 } 836 837 numcache++; 838 if (vp == NULL) { 839 numneg++; 840 if (cnp->cn_flags & ISWHITEOUT) 841 ncp->nc_flag |= NCF_WHITE; 842 } else if (vp->v_type == VDIR) { 843 if (flag != NCF_ISDOTDOT) { 844 /* 845 * For this case, the cache entry maps both the 846 * directory name in it and the name ".." for the 847 * directory's parent. 848 */ 849 if ((n2 = vp->v_cache_dd) != NULL && 850 (n2->nc_flag & NCF_ISDOTDOT) != 0) 851 cache_zap(n2); 852 vp->v_cache_dd = ncp; 853 } 854 } else { 855 vp->v_cache_dd = NULL; 856 } 857 858 /* 859 * Insert the new namecache entry into the appropriate chain 860 * within the cache entries table. 861 */ 862 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 863 if (flag != NCF_ISDOTDOT) { 864 if (LIST_EMPTY(&dvp->v_cache_src)) { 865 hold = 1; 866 numcachehv++; 867 } 868 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 869 } 870 871 /* 872 * If the entry is "negative", we place it into the 873 * "negative" cache queue, otherwise, we place it into the 874 * destination vnode's cache entries queue. 875 */ 876 if (vp != NULL) { 877 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 878 SDT_PROBE3(vfs, namecache, enter, done, dvp, nc_get_name(ncp), 879 vp); 880 } else { 881 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 882 SDT_PROBE2(vfs, namecache, enter_negative, done, dvp, 883 nc_get_name(ncp)); 884 } 885 if (numneg * ncnegfactor > numcache) { 886 ncp = TAILQ_FIRST(&ncneg); 887 KASSERT(ncp->nc_vp == NULL, ("ncp %p vp %p on ncneg", 888 ncp, ncp->nc_vp)); 889 zap = 1; 890 } 891 if (hold) 892 vhold(dvp); 893 if (zap) 894 cache_zap(ncp); 895 CACHE_WUNLOCK(); 896 } 897 898 /* 899 * Name cache initialization, from vfs_init() when we are booting 900 */ 901 static void 902 nchinit(void *dummy __unused) 903 { 904 905 TAILQ_INIT(&ncneg); 906 907 cache_zone_small = uma_zcreate("S VFS Cache", 908 sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1, 909 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 910 cache_zone_small_ts = uma_zcreate("STS VFS Cache", 911 sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1, 912 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 913 cache_zone_large = uma_zcreate("L VFS Cache", 914 sizeof(struct namecache) + NAME_MAX + 1, 915 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 916 cache_zone_large_ts = uma_zcreate("LTS VFS Cache", 917 sizeof(struct namecache_ts) + NAME_MAX + 1, 918 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 919 920 nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash); 921 } 922 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL); 923 924 void 925 cache_changesize(int newmaxvnodes) 926 { 927 struct nchashhead *new_nchashtbl, *old_nchashtbl; 928 u_long new_nchash, old_nchash; 929 struct namecache *ncp; 930 uint32_t hash; 931 int i; 932 933 new_nchashtbl = hashinit(newmaxvnodes * 2, M_VFSCACHE, &new_nchash); 934 /* If same hash table size, nothing to do */ 935 if (nchash == new_nchash) { 936 free(new_nchashtbl, M_VFSCACHE); 937 return; 938 } 939 /* 940 * Move everything from the old hash table to the new table. 941 * None of the namecache entries in the table can be removed 942 * because to do so, they have to be removed from the hash table. 943 */ 944 CACHE_WLOCK(); 945 old_nchashtbl = nchashtbl; 946 old_nchash = nchash; 947 nchashtbl = new_nchashtbl; 948 nchash = new_nchash; 949 for (i = 0; i <= old_nchash; i++) { 950 while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) { 951 hash = fnv_32_buf(nc_get_name(ncp), ncp->nc_nlen, 952 FNV1_32_INIT); 953 hash = fnv_32_buf(&ncp->nc_dvp, sizeof(ncp->nc_dvp), 954 hash); 955 LIST_REMOVE(ncp, nc_hash); 956 LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash); 957 } 958 } 959 CACHE_WUNLOCK(); 960 free(old_nchashtbl, M_VFSCACHE); 961 } 962 963 /* 964 * Invalidate all entries to a particular vnode. 965 */ 966 void 967 cache_purge(struct vnode *vp) 968 { 969 970 CTR1(KTR_VFS, "cache_purge(%p)", vp); 971 SDT_PROBE1(vfs, namecache, purge, done, vp); 972 CACHE_WLOCK(); 973 while (!LIST_EMPTY(&vp->v_cache_src)) 974 cache_zap(LIST_FIRST(&vp->v_cache_src)); 975 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 976 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 977 if (vp->v_cache_dd != NULL) { 978 KASSERT(vp->v_cache_dd->nc_flag & NCF_ISDOTDOT, 979 ("lost dotdot link")); 980 cache_zap(vp->v_cache_dd); 981 } 982 KASSERT(vp->v_cache_dd == NULL, ("incomplete purge")); 983 CACHE_WUNLOCK(); 984 } 985 986 /* 987 * Invalidate all negative entries for a particular directory vnode. 988 */ 989 void 990 cache_purge_negative(struct vnode *vp) 991 { 992 struct namecache *cp, *ncp; 993 994 CTR1(KTR_VFS, "cache_purge_negative(%p)", vp); 995 SDT_PROBE1(vfs, namecache, purge_negative, done, vp); 996 CACHE_WLOCK(); 997 LIST_FOREACH_SAFE(cp, &vp->v_cache_src, nc_src, ncp) { 998 if (cp->nc_vp == NULL) 999 cache_zap(cp); 1000 } 1001 CACHE_WUNLOCK(); 1002 } 1003 1004 /* 1005 * Flush all entries referencing a particular filesystem. 1006 */ 1007 void 1008 cache_purgevfs(struct mount *mp) 1009 { 1010 struct nchashhead *ncpp; 1011 struct namecache *ncp, *nnp; 1012 1013 /* Scan hash tables for applicable entries */ 1014 SDT_PROBE1(vfs, namecache, purgevfs, done, mp); 1015 CACHE_WLOCK(); 1016 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 1017 LIST_FOREACH_SAFE(ncp, ncpp, nc_hash, nnp) { 1018 if (ncp->nc_dvp->v_mount == mp) 1019 cache_zap(ncp); 1020 } 1021 } 1022 CACHE_WUNLOCK(); 1023 } 1024 1025 /* 1026 * Perform canonical checks and cache lookup and pass on to filesystem 1027 * through the vop_cachedlookup only if needed. 1028 */ 1029 1030 int 1031 vfs_cache_lookup(struct vop_lookup_args *ap) 1032 { 1033 struct vnode *dvp; 1034 int error; 1035 struct vnode **vpp = ap->a_vpp; 1036 struct componentname *cnp = ap->a_cnp; 1037 struct ucred *cred = cnp->cn_cred; 1038 int flags = cnp->cn_flags; 1039 struct thread *td = cnp->cn_thread; 1040 1041 *vpp = NULL; 1042 dvp = ap->a_dvp; 1043 1044 if (dvp->v_type != VDIR) 1045 return (ENOTDIR); 1046 1047 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 1048 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 1049 return (EROFS); 1050 1051 error = VOP_ACCESS(dvp, VEXEC, cred, td); 1052 if (error) 1053 return (error); 1054 1055 error = cache_lookup(dvp, vpp, cnp, NULL, NULL); 1056 if (error == 0) 1057 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 1058 if (error == -1) 1059 return (0); 1060 return (error); 1061 } 1062 1063 /* 1064 * XXX All of these sysctls would probably be more productive dead. 1065 */ 1066 static int disablecwd; 1067 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, 1068 "Disable the getcwd syscall"); 1069 1070 /* Implementation of the getcwd syscall. */ 1071 int 1072 sys___getcwd(struct thread *td, struct __getcwd_args *uap) 1073 { 1074 1075 return (kern___getcwd(td, uap->buf, UIO_USERSPACE, uap->buflen, 1076 MAXPATHLEN)); 1077 } 1078 1079 int 1080 kern___getcwd(struct thread *td, char *buf, enum uio_seg bufseg, u_int buflen, 1081 u_int path_max) 1082 { 1083 char *bp, *tmpbuf; 1084 struct filedesc *fdp; 1085 struct vnode *cdir, *rdir; 1086 int error; 1087 1088 if (disablecwd) 1089 return (ENODEV); 1090 if (buflen < 2) 1091 return (EINVAL); 1092 if (buflen > path_max) 1093 buflen = path_max; 1094 1095 tmpbuf = malloc(buflen, M_TEMP, M_WAITOK); 1096 fdp = td->td_proc->p_fd; 1097 FILEDESC_SLOCK(fdp); 1098 cdir = fdp->fd_cdir; 1099 VREF(cdir); 1100 rdir = fdp->fd_rdir; 1101 VREF(rdir); 1102 FILEDESC_SUNLOCK(fdp); 1103 error = vn_fullpath1(td, cdir, rdir, tmpbuf, &bp, buflen); 1104 vrele(rdir); 1105 vrele(cdir); 1106 1107 if (!error) { 1108 if (bufseg == UIO_SYSSPACE) 1109 bcopy(bp, buf, strlen(bp) + 1); 1110 else 1111 error = copyout(bp, buf, strlen(bp) + 1); 1112 #ifdef KTRACE 1113 if (KTRPOINT(curthread, KTR_NAMEI)) 1114 ktrnamei(bp); 1115 #endif 1116 } 1117 free(tmpbuf, M_TEMP); 1118 return (error); 1119 } 1120 1121 /* 1122 * Thus begins the fullpath magic. 1123 */ 1124 1125 #undef STATNODE 1126 #define STATNODE(name, descr) \ 1127 static u_int name; \ 1128 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, descr) 1129 1130 static int disablefullpath; 1131 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0, 1132 "Disable the vn_fullpath function"); 1133 1134 /* These count for kern___getcwd(), too. */ 1135 STATNODE(numfullpathcalls, "Number of fullpath search calls"); 1136 STATNODE(numfullpathfail1, "Number of fullpath search errors (ENOTDIR)"); 1137 STATNODE(numfullpathfail2, 1138 "Number of fullpath search errors (VOP_VPTOCNP failures)"); 1139 STATNODE(numfullpathfail4, "Number of fullpath search errors (ENOMEM)"); 1140 STATNODE(numfullpathfound, "Number of successful fullpath calls"); 1141 1142 /* 1143 * Retrieve the full filesystem path that correspond to a vnode from the name 1144 * cache (if available) 1145 */ 1146 int 1147 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf) 1148 { 1149 char *buf; 1150 struct filedesc *fdp; 1151 struct vnode *rdir; 1152 int error; 1153 1154 if (disablefullpath) 1155 return (ENODEV); 1156 if (vn == NULL) 1157 return (EINVAL); 1158 1159 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1160 fdp = td->td_proc->p_fd; 1161 FILEDESC_SLOCK(fdp); 1162 rdir = fdp->fd_rdir; 1163 VREF(rdir); 1164 FILEDESC_SUNLOCK(fdp); 1165 error = vn_fullpath1(td, vn, rdir, buf, retbuf, MAXPATHLEN); 1166 vrele(rdir); 1167 1168 if (!error) 1169 *freebuf = buf; 1170 else 1171 free(buf, M_TEMP); 1172 return (error); 1173 } 1174 1175 /* 1176 * This function is similar to vn_fullpath, but it attempts to lookup the 1177 * pathname relative to the global root mount point. This is required for the 1178 * auditing sub-system, as audited pathnames must be absolute, relative to the 1179 * global root mount point. 1180 */ 1181 int 1182 vn_fullpath_global(struct thread *td, struct vnode *vn, 1183 char **retbuf, char **freebuf) 1184 { 1185 char *buf; 1186 int error; 1187 1188 if (disablefullpath) 1189 return (ENODEV); 1190 if (vn == NULL) 1191 return (EINVAL); 1192 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 1193 error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN); 1194 if (!error) 1195 *freebuf = buf; 1196 else 1197 free(buf, M_TEMP); 1198 return (error); 1199 } 1200 1201 int 1202 vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) 1203 { 1204 int error; 1205 1206 CACHE_RLOCK(); 1207 error = vn_vptocnp_locked(vp, cred, buf, buflen); 1208 if (error == 0) 1209 CACHE_RUNLOCK(); 1210 return (error); 1211 } 1212 1213 static int 1214 vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf, 1215 u_int *buflen) 1216 { 1217 struct vnode *dvp; 1218 struct namecache *ncp; 1219 int error; 1220 1221 TAILQ_FOREACH(ncp, &((*vp)->v_cache_dst), nc_dst) { 1222 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1223 break; 1224 } 1225 if (ncp != NULL) { 1226 if (*buflen < ncp->nc_nlen) { 1227 CACHE_RUNLOCK(); 1228 vrele(*vp); 1229 numfullpathfail4++; 1230 error = ENOMEM; 1231 SDT_PROBE3(vfs, namecache, fullpath, return, error, 1232 vp, NULL); 1233 return (error); 1234 } 1235 *buflen -= ncp->nc_nlen; 1236 memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen); 1237 SDT_PROBE3(vfs, namecache, fullpath, hit, ncp->nc_dvp, 1238 nc_get_name(ncp), vp); 1239 dvp = *vp; 1240 *vp = ncp->nc_dvp; 1241 vref(*vp); 1242 CACHE_RUNLOCK(); 1243 vrele(dvp); 1244 CACHE_RLOCK(); 1245 return (0); 1246 } 1247 SDT_PROBE1(vfs, namecache, fullpath, miss, vp); 1248 1249 CACHE_RUNLOCK(); 1250 vn_lock(*vp, LK_SHARED | LK_RETRY); 1251 error = VOP_VPTOCNP(*vp, &dvp, cred, buf, buflen); 1252 vput(*vp); 1253 if (error) { 1254 numfullpathfail2++; 1255 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 1256 return (error); 1257 } 1258 1259 *vp = dvp; 1260 CACHE_RLOCK(); 1261 if (dvp->v_iflag & VI_DOOMED) { 1262 /* forced unmount */ 1263 CACHE_RUNLOCK(); 1264 vrele(dvp); 1265 error = ENOENT; 1266 SDT_PROBE3(vfs, namecache, fullpath, return, error, vp, NULL); 1267 return (error); 1268 } 1269 /* 1270 * *vp has its use count incremented still. 1271 */ 1272 1273 return (0); 1274 } 1275 1276 /* 1277 * The magic behind kern___getcwd() and vn_fullpath(). 1278 */ 1279 static int 1280 vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, 1281 char *buf, char **retbuf, u_int buflen) 1282 { 1283 int error, slash_prefixed; 1284 #ifdef KDTRACE_HOOKS 1285 struct vnode *startvp = vp; 1286 #endif 1287 struct vnode *vp1; 1288 1289 buflen--; 1290 buf[buflen] = '\0'; 1291 error = 0; 1292 slash_prefixed = 0; 1293 1294 SDT_PROBE1(vfs, namecache, fullpath, entry, vp); 1295 numfullpathcalls++; 1296 vref(vp); 1297 CACHE_RLOCK(); 1298 if (vp->v_type != VDIR) { 1299 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1300 if (error) 1301 return (error); 1302 if (buflen == 0) { 1303 CACHE_RUNLOCK(); 1304 vrele(vp); 1305 return (ENOMEM); 1306 } 1307 buf[--buflen] = '/'; 1308 slash_prefixed = 1; 1309 } 1310 while (vp != rdir && vp != rootvnode) { 1311 if (vp->v_vflag & VV_ROOT) { 1312 if (vp->v_iflag & VI_DOOMED) { /* forced unmount */ 1313 CACHE_RUNLOCK(); 1314 vrele(vp); 1315 error = ENOENT; 1316 SDT_PROBE3(vfs, namecache, fullpath, return, 1317 error, vp, NULL); 1318 break; 1319 } 1320 vp1 = vp->v_mount->mnt_vnodecovered; 1321 vref(vp1); 1322 CACHE_RUNLOCK(); 1323 vrele(vp); 1324 vp = vp1; 1325 CACHE_RLOCK(); 1326 continue; 1327 } 1328 if (vp->v_type != VDIR) { 1329 CACHE_RUNLOCK(); 1330 vrele(vp); 1331 numfullpathfail1++; 1332 error = ENOTDIR; 1333 SDT_PROBE3(vfs, namecache, fullpath, return, 1334 error, vp, NULL); 1335 break; 1336 } 1337 error = vn_vptocnp_locked(&vp, td->td_ucred, buf, &buflen); 1338 if (error) 1339 break; 1340 if (buflen == 0) { 1341 CACHE_RUNLOCK(); 1342 vrele(vp); 1343 error = ENOMEM; 1344 SDT_PROBE3(vfs, namecache, fullpath, return, error, 1345 startvp, NULL); 1346 break; 1347 } 1348 buf[--buflen] = '/'; 1349 slash_prefixed = 1; 1350 } 1351 if (error) 1352 return (error); 1353 if (!slash_prefixed) { 1354 if (buflen == 0) { 1355 CACHE_RUNLOCK(); 1356 vrele(vp); 1357 numfullpathfail4++; 1358 SDT_PROBE3(vfs, namecache, fullpath, return, ENOMEM, 1359 startvp, NULL); 1360 return (ENOMEM); 1361 } 1362 buf[--buflen] = '/'; 1363 } 1364 numfullpathfound++; 1365 CACHE_RUNLOCK(); 1366 vrele(vp); 1367 1368 SDT_PROBE3(vfs, namecache, fullpath, return, 0, startvp, buf + buflen); 1369 *retbuf = buf + buflen; 1370 return (0); 1371 } 1372 1373 struct vnode * 1374 vn_dir_dd_ino(struct vnode *vp) 1375 { 1376 struct namecache *ncp; 1377 struct vnode *ddvp; 1378 1379 ASSERT_VOP_LOCKED(vp, "vn_dir_dd_ino"); 1380 CACHE_RLOCK(); 1381 TAILQ_FOREACH(ncp, &(vp->v_cache_dst), nc_dst) { 1382 if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) 1383 continue; 1384 ddvp = ncp->nc_dvp; 1385 vhold(ddvp); 1386 CACHE_RUNLOCK(); 1387 if (vget(ddvp, LK_SHARED | LK_NOWAIT | LK_VNHELD, curthread)) 1388 return (NULL); 1389 return (ddvp); 1390 } 1391 CACHE_RUNLOCK(); 1392 return (NULL); 1393 } 1394 1395 int 1396 vn_commname(struct vnode *vp, char *buf, u_int buflen) 1397 { 1398 struct namecache *ncp; 1399 int l; 1400 1401 CACHE_RLOCK(); 1402 TAILQ_FOREACH(ncp, &vp->v_cache_dst, nc_dst) 1403 if ((ncp->nc_flag & NCF_ISDOTDOT) == 0) 1404 break; 1405 if (ncp == NULL) { 1406 CACHE_RUNLOCK(); 1407 return (ENOENT); 1408 } 1409 l = min(ncp->nc_nlen, buflen - 1); 1410 memcpy(buf, nc_get_name(ncp), l); 1411 CACHE_RUNLOCK(); 1412 buf[l] = '\0'; 1413 return (0); 1414 } 1415 1416 /* ABI compat shims for old kernel modules. */ 1417 #undef cache_enter 1418 1419 void cache_enter(struct vnode *dvp, struct vnode *vp, 1420 struct componentname *cnp); 1421 1422 void 1423 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 1424 { 1425 1426 cache_enter_time(dvp, vp, cnp, NULL, NULL); 1427 } 1428 1429 /* 1430 * This function updates path string to vnode's full global path 1431 * and checks the size of the new path string against the pathlen argument. 1432 * 1433 * Requires a locked, referenced vnode. 1434 * Vnode is re-locked on success or ENODEV, otherwise unlocked. 1435 * 1436 * If sysctl debug.disablefullpath is set, ENODEV is returned, 1437 * vnode is left locked and path remain untouched. 1438 * 1439 * If vp is a directory, the call to vn_fullpath_global() always succeeds 1440 * because it falls back to the ".." lookup if the namecache lookup fails. 1441 */ 1442 int 1443 vn_path_to_global_path(struct thread *td, struct vnode *vp, char *path, 1444 u_int pathlen) 1445 { 1446 struct nameidata nd; 1447 struct vnode *vp1; 1448 char *rpath, *fbuf; 1449 int error; 1450 1451 ASSERT_VOP_ELOCKED(vp, __func__); 1452 1453 /* Return ENODEV if sysctl debug.disablefullpath==1 */ 1454 if (disablefullpath) 1455 return (ENODEV); 1456 1457 /* Construct global filesystem path from vp. */ 1458 VOP_UNLOCK(vp, 0); 1459 error = vn_fullpath_global(td, vp, &rpath, &fbuf); 1460 1461 if (error != 0) { 1462 vrele(vp); 1463 return (error); 1464 } 1465 1466 if (strlen(rpath) >= pathlen) { 1467 vrele(vp); 1468 error = ENAMETOOLONG; 1469 goto out; 1470 } 1471 1472 /* 1473 * Re-lookup the vnode by path to detect a possible rename. 1474 * As a side effect, the vnode is relocked. 1475 * If vnode was renamed, return ENOENT. 1476 */ 1477 NDINIT(&nd, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNODE1, 1478 UIO_SYSSPACE, path, td); 1479 error = namei(&nd); 1480 if (error != 0) { 1481 vrele(vp); 1482 goto out; 1483 } 1484 NDFREE(&nd, NDF_ONLY_PNBUF); 1485 vp1 = nd.ni_vp; 1486 vrele(vp); 1487 if (vp1 == vp) 1488 strcpy(path, rpath); 1489 else { 1490 vput(vp1); 1491 error = ENOENT; 1492 } 1493 1494 out: 1495 free(fbuf, M_TEMP); 1496 return (error); 1497 } 1498