1 /* 2 * Copyright (c) 1989, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Poul-Henning Kamp of the FreeBSD Project. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95 37 * $FreeBSD$ 38 */ 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/kernel.h> 43 #include <sys/sysctl.h> 44 #include <sys/mount.h> 45 #include <sys/vnode.h> 46 #include <sys/namei.h> 47 #include <sys/malloc.h> 48 #include <sys/sysproto.h> 49 #include <sys/proc.h> 50 #include <sys/filedesc.h> 51 52 /* 53 * This structure describes the elements in the cache of recent 54 * names looked up by namei. 55 */ 56 57 struct namecache { 58 LIST_ENTRY(namecache) nc_hash; /* hash chain */ 59 LIST_ENTRY(namecache) nc_src; /* source vnode list */ 60 TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */ 61 struct vnode *nc_dvp; /* vnode of parent of name */ 62 struct vnode *nc_vp; /* vnode the name refers to */ 63 u_char nc_flag; /* flag bits */ 64 u_char nc_nlen; /* length of name */ 65 char nc_name[0]; /* segment name */ 66 }; 67 68 /* 69 * Name caching works as follows: 70 * 71 * Names found by directory scans are retained in a cache 72 * for future reference. It is managed LRU, so frequently 73 * used names will hang around. Cache is indexed by hash value 74 * obtained from (vp, name) where vp refers to the directory 75 * containing name. 76 * 77 * If it is a "negative" entry, (i.e. for a name that is known NOT to 78 * exist) the vnode pointer will be NULL. 79 * 80 * Upon reaching the last segment of a path, if the reference 81 * is for DELETE, or NOCACHE is set (rewrite), and the 82 * name is located in the cache, it will be dropped. 83 */ 84 85 /* 86 * Structures associated with name cacheing. 87 */ 88 #define NCHHASH(dvp, hash) \ 89 (&nchashtbl[((dvp)->v_id + (hash)) & nchash]) 90 static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ 91 static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */ 92 static u_long nchash; /* size of hash table */ 93 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, ""); 94 static u_long ncnegfactor = 16; /* ratio of negative entries */ 95 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, ""); 96 static u_long numneg; /* number of cache entries allocated */ 97 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, ""); 98 static u_long numcache; /* number of cache entries allocated */ 99 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, ""); 100 struct nchstats nchstats; /* cache effectiveness statistics */ 101 102 static int doingcache = 1; /* 1 => enable the cache */ 103 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, ""); 104 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), ""); 105 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), ""); 106 107 /* 108 * The new name cache statistics 109 */ 110 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics"); 111 #define STATNODE(mode, name, var) \ 112 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, ""); 113 STATNODE(CTLFLAG_RD, numneg, &numneg); 114 STATNODE(CTLFLAG_RD, numcache, &numcache); 115 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls); 116 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits); 117 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits); 118 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks); 119 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss); 120 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap); 121 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps); 122 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits); 123 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps); 124 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits); 125 126 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats, 127 sizeof(nchstats), "LU", "VFS cache effectiveness statistics"); 128 129 130 131 static void cache_zap __P((struct namecache *ncp)); 132 133 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries"); 134 135 /* 136 * Flags in namecache.nc_flag 137 */ 138 #define NCF_WHITE 1 139 /* 140 * Delete an entry from its hash list and move it to the front 141 * of the LRU list for immediate reuse. 142 */ 143 static void 144 cache_zap(ncp) 145 struct namecache *ncp; 146 { 147 LIST_REMOVE(ncp, nc_hash); 148 LIST_REMOVE(ncp, nc_src); 149 if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) 150 vdrop(ncp->nc_dvp); 151 if (ncp->nc_vp) { 152 TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst); 153 } else { 154 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 155 numneg--; 156 } 157 numcache--; 158 free(ncp, M_VFSCACHE); 159 } 160 161 /* 162 * Lookup an entry in the cache 163 * 164 * We don't do this if the segment name is long, simply so the cache 165 * can avoid holding long names (which would either waste space, or 166 * add greatly to the complexity). 167 * 168 * Lookup is called with dvp pointing to the directory to search, 169 * cnp pointing to the name of the entry being sought. If the lookup 170 * succeeds, the vnode is returned in *vpp, and a status of -1 is 171 * returned. If the lookup determines that the name does not exist 172 * (negative cacheing), a status of ENOENT is returned. If the lookup 173 * fails, a status of zero is returned. 174 */ 175 176 int 177 cache_lookup(dvp, vpp, cnp) 178 struct vnode *dvp; 179 struct vnode **vpp; 180 struct componentname *cnp; 181 { 182 struct namecache *ncp; 183 u_long hash; 184 u_char *cp; 185 int len; 186 187 if (!doingcache) { 188 cnp->cn_flags &= ~MAKEENTRY; 189 return (0); 190 } 191 192 numcalls++; 193 194 if (cnp->cn_nameptr[0] == '.') { 195 if (cnp->cn_namelen == 1) { 196 *vpp = dvp; 197 dothits++; 198 return (-1); 199 } 200 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 201 dotdothits++; 202 if (dvp->v_dd->v_id != dvp->v_ddid || 203 (cnp->cn_flags & MAKEENTRY) == 0) { 204 dvp->v_ddid = 0; 205 return (0); 206 } 207 *vpp = dvp->v_dd; 208 return (-1); 209 } 210 } 211 212 hash = 0; 213 len = cnp->cn_namelen; 214 for (cp = cnp->cn_nameptr; len; len--, cp++) 215 hash += *cp; 216 LIST_FOREACH(ncp, (NCHHASH(dvp, hash)), nc_hash) { 217 numchecks++; 218 if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen && 219 !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen)) 220 break; 221 } 222 223 /* We failed to find an entry */ 224 if (ncp == 0) { 225 if ((cnp->cn_flags & MAKEENTRY) == 0) { 226 nummisszap++; 227 } else { 228 nummiss++; 229 } 230 nchstats.ncs_miss++; 231 return (0); 232 } 233 234 /* We don't want to have an entry, so dump it */ 235 if ((cnp->cn_flags & MAKEENTRY) == 0) { 236 numposzaps++; 237 nchstats.ncs_badhits++; 238 cache_zap(ncp); 239 return (0); 240 } 241 242 /* We found a "positive" match, return the vnode */ 243 if (ncp->nc_vp) { 244 numposhits++; 245 nchstats.ncs_goodhits++; 246 *vpp = ncp->nc_vp; 247 return (-1); 248 } 249 250 /* We found a negative match, and want to create it, so purge */ 251 if (cnp->cn_nameiop == CREATE) { 252 numnegzaps++; 253 nchstats.ncs_badhits++; 254 cache_zap(ncp); 255 return (0); 256 } 257 258 numneghits++; 259 /* 260 * We found a "negative" match, ENOENT notifies client of this match. 261 * The nc_vpid field records whether this is a whiteout. 262 */ 263 TAILQ_REMOVE(&ncneg, ncp, nc_dst); 264 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 265 nchstats.ncs_neghits++; 266 if (ncp->nc_flag & NCF_WHITE) 267 cnp->cn_flags |= ISWHITEOUT; 268 return (ENOENT); 269 } 270 271 /* 272 * Add an entry to the cache. 273 */ 274 void 275 cache_enter(dvp, vp, cnp) 276 struct vnode *dvp; 277 struct vnode *vp; 278 struct componentname *cnp; 279 { 280 struct namecache *ncp; 281 struct nchashhead *ncpp; 282 u_long hash; 283 u_char *cp, *dp; 284 int len; 285 286 if (!doingcache) 287 return; 288 289 if (cnp->cn_nameptr[0] == '.') { 290 if (cnp->cn_namelen == 1) { 291 return; 292 } 293 if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') { 294 if (vp) { 295 dvp->v_dd = vp; 296 dvp->v_ddid = vp->v_id; 297 } else { 298 dvp->v_dd = dvp; 299 dvp->v_ddid = 0; 300 } 301 return; 302 } 303 } 304 305 ncp = (struct namecache *) 306 malloc(sizeof *ncp + cnp->cn_namelen, M_VFSCACHE, M_WAITOK); 307 bzero((char *)ncp, sizeof *ncp); 308 numcache++; 309 if (!vp) { 310 numneg++; 311 ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0; 312 } else if (vp->v_type == VDIR) { 313 vp->v_dd = dvp; 314 vp->v_ddid = dvp->v_id; 315 } 316 317 /* 318 * Fill in cache info, if vp is NULL this is a "negative" cache entry. 319 * For negative entries, we have to record whether it is a whiteout. 320 * the whiteout flag is stored in the nc_vpid field which is 321 * otherwise unused. 322 */ 323 ncp->nc_vp = vp; 324 ncp->nc_dvp = dvp; 325 len = ncp->nc_nlen = cnp->cn_namelen; 326 hash = 0; 327 dp = ncp->nc_name; 328 for (cp = cnp->cn_nameptr; len; len--, cp++, dp++) 329 hash += (*dp = *cp); 330 ncpp = NCHHASH(dvp, hash); 331 LIST_INSERT_HEAD(ncpp, ncp, nc_hash); 332 if (LIST_EMPTY(&dvp->v_cache_src)) 333 vhold(dvp); 334 LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src); 335 if (vp) { 336 TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst); 337 } else { 338 TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst); 339 } 340 if (numneg * ncnegfactor > numcache) { 341 ncp = TAILQ_FIRST(&ncneg); 342 cache_zap(ncp); 343 } 344 } 345 346 /* 347 * Name cache initialization, from vfs_init() when we are booting 348 */ 349 void 350 nchinit() 351 { 352 353 TAILQ_INIT(&ncneg); 354 nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash); 355 } 356 357 /* 358 * Invalidate all entries to a particular vnode. 359 * 360 * Remove all entries in the namecache relating to this vnode and 361 * change the v_id. We take the v_id from a global counter, since 362 * it becomes a handy sequence number in crash-dumps that way. 363 * No valid vnode will ever have (v_id == 0). 364 * 365 * XXX: Only time and the size of v_id prevents this from failing: 366 * XXX: In theory we should hunt down all (struct vnode*, v_id) 367 * XXX: soft references and nuke them, at least on the global 368 * XXX: v_id wraparound. The period of resistance can be extended 369 * XXX: by incrementing each vnodes v_id individually instead of 370 * XXX: using the global v_id. 371 */ 372 373 void 374 cache_purge(vp) 375 struct vnode *vp; 376 { 377 static u_long nextid; 378 379 while (!LIST_EMPTY(&vp->v_cache_src)) 380 cache_zap(LIST_FIRST(&vp->v_cache_src)); 381 while (!TAILQ_EMPTY(&vp->v_cache_dst)) 382 cache_zap(TAILQ_FIRST(&vp->v_cache_dst)); 383 384 do 385 nextid++; 386 while (nextid == vp->v_id || !nextid); 387 vp->v_id = nextid; 388 vp->v_dd = vp; 389 vp->v_ddid = 0; 390 } 391 392 /* 393 * Flush all entries referencing a particular filesystem. 394 * 395 * Since we need to check it anyway, we will flush all the invalid 396 * entries at the same time. 397 */ 398 void 399 cache_purgevfs(mp) 400 struct mount *mp; 401 { 402 struct nchashhead *ncpp; 403 struct namecache *ncp, *nnp; 404 405 /* Scan hash tables for applicable entries */ 406 for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) { 407 for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) { 408 nnp = LIST_NEXT(ncp, nc_hash); 409 if (ncp->nc_dvp->v_mount == mp) { 410 cache_zap(ncp); 411 } 412 } 413 } 414 } 415 416 /* 417 * Perform canonical checks and cache lookup and pass on to filesystem 418 * through the vop_cachedlookup only if needed. 419 */ 420 421 int 422 vfs_cache_lookup(ap) 423 struct vop_lookup_args /* { 424 struct vnode *a_dvp; 425 struct vnode **a_vpp; 426 struct componentname *a_cnp; 427 } */ *ap; 428 { 429 struct vnode *dvp, *vp; 430 int lockparent; 431 int error; 432 struct vnode **vpp = ap->a_vpp; 433 struct componentname *cnp = ap->a_cnp; 434 struct ucred *cred = cnp->cn_cred; 435 int flags = cnp->cn_flags; 436 struct proc *p = cnp->cn_proc; 437 u_long vpid; /* capability number of vnode */ 438 439 *vpp = NULL; 440 dvp = ap->a_dvp; 441 lockparent = flags & LOCKPARENT; 442 443 if (dvp->v_type != VDIR) 444 return (ENOTDIR); 445 446 if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) && 447 (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME)) 448 return (EROFS); 449 450 error = VOP_ACCESS(dvp, VEXEC, cred, p); 451 452 if (error) 453 return (error); 454 455 error = cache_lookup(dvp, vpp, cnp); 456 457 if (!error) 458 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 459 460 if (error == ENOENT) 461 return (error); 462 463 vp = *vpp; 464 vpid = vp->v_id; 465 cnp->cn_flags &= ~PDIRUNLOCK; 466 if (dvp == vp) { /* lookup on "." */ 467 VREF(vp); 468 error = 0; 469 } else if (flags & ISDOTDOT) { 470 VOP_UNLOCK(dvp, 0, p); 471 cnp->cn_flags |= PDIRUNLOCK; 472 error = vget(vp, LK_EXCLUSIVE, p); 473 if (!error && lockparent && (flags & ISLASTCN)) { 474 if ((error = vn_lock(dvp, LK_EXCLUSIVE, p)) == 0) 475 cnp->cn_flags &= ~PDIRUNLOCK; 476 } 477 } else { 478 error = vget(vp, LK_EXCLUSIVE, p); 479 if (!lockparent || error || !(flags & ISLASTCN)) { 480 VOP_UNLOCK(dvp, 0, p); 481 cnp->cn_flags |= PDIRUNLOCK; 482 } 483 } 484 /* 485 * Check that the capability number did not change 486 * while we were waiting for the lock. 487 */ 488 if (!error) { 489 if (vpid == vp->v_id) 490 return (0); 491 vput(vp); 492 if (lockparent && dvp != vp && (flags & ISLASTCN)) { 493 VOP_UNLOCK(dvp, 0, p); 494 cnp->cn_flags |= PDIRUNLOCK; 495 } 496 } 497 if (cnp->cn_flags & PDIRUNLOCK) { 498 error = vn_lock(dvp, LK_EXCLUSIVE, p); 499 if (error) 500 return (error); 501 cnp->cn_flags &= ~PDIRUNLOCK; 502 } 503 return (VOP_CACHEDLOOKUP(dvp, vpp, cnp)); 504 } 505 506 507 #ifndef _SYS_SYSPROTO_H_ 508 struct __getcwd_args { 509 u_char *buf; 510 u_int buflen; 511 }; 512 #endif 513 514 static int disablecwd; 515 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, ""); 516 517 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls); 518 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1); 519 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2); 520 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3); 521 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4); 522 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound); 523 int 524 __getcwd(p, uap) 525 struct proc *p; 526 struct __getcwd_args *uap; 527 { 528 char *bp, *buf; 529 int error, i, slash_prefixed; 530 struct filedesc *fdp; 531 struct namecache *ncp; 532 struct vnode *vp; 533 534 numcwdcalls++; 535 if (disablecwd) 536 return (ENODEV); 537 if (uap->buflen < 2) 538 return (EINVAL); 539 if (uap->buflen > MAXPATHLEN) 540 uap->buflen = MAXPATHLEN; 541 buf = bp = malloc(uap->buflen, M_TEMP, M_WAITOK); 542 bp += uap->buflen - 1; 543 *bp = '\0'; 544 fdp = p->p_fd; 545 slash_prefixed = 0; 546 for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) { 547 if (vp->v_flag & VROOT) { 548 if (vp->v_mount == NULL) /* forced unmount */ 549 return (EBADF); 550 vp = vp->v_mount->mnt_vnodecovered; 551 continue; 552 } 553 if (vp->v_dd->v_id != vp->v_ddid) { 554 numcwdfail1++; 555 free(buf, M_TEMP); 556 return (ENOTDIR); 557 } 558 ncp = TAILQ_FIRST(&vp->v_cache_dst); 559 if (!ncp) { 560 numcwdfail2++; 561 free(buf, M_TEMP); 562 return (ENOENT); 563 } 564 if (ncp->nc_dvp != vp->v_dd) { 565 numcwdfail3++; 566 free(buf, M_TEMP); 567 return (EBADF); 568 } 569 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 570 if (bp == buf) { 571 numcwdfail4++; 572 free(buf, M_TEMP); 573 return (ENOMEM); 574 } 575 *--bp = ncp->nc_name[i]; 576 } 577 if (bp == buf) { 578 numcwdfail4++; 579 free(buf, M_TEMP); 580 return (ENOMEM); 581 } 582 *--bp = '/'; 583 slash_prefixed = 1; 584 vp = vp->v_dd; 585 } 586 if (!slash_prefixed) { 587 if (bp == buf) { 588 numcwdfail4++; 589 free(buf, M_TEMP); 590 return (ENOMEM); 591 } 592 *--bp = '/'; 593 } 594 numcwdfound++; 595 error = copyout(bp, uap->buf, strlen(bp) + 1); 596 free(buf, M_TEMP); 597 return (error); 598 } 599 600 /* 601 * Thus begins the fullpath magic. 602 */ 603 604 #undef STATNODE 605 #define STATNODE(name) \ 606 static u_int name; \ 607 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "") 608 609 static int disablefullpath; 610 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, 611 &disablefullpath, 0, ""); 612 613 STATNODE(numfullpathcalls); 614 STATNODE(numfullpathfail1); 615 STATNODE(numfullpathfail2); 616 STATNODE(numfullpathfail3); 617 STATNODE(numfullpathfail4); 618 STATNODE(numfullpathfound); 619 620 int 621 textvp_fullpath(struct proc *p, char **retbuf, char **retfreebuf) { 622 char *bp, *buf; 623 int i, slash_prefixed; 624 struct filedesc *fdp; 625 struct namecache *ncp; 626 struct vnode *vp, *textvp; 627 628 numfullpathcalls++; 629 if (disablefullpath) 630 return (ENODEV); 631 textvp = p->p_textvp; 632 if (textvp == NULL) 633 return (EINVAL); 634 buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK); 635 bp = buf + MAXPATHLEN - 1; 636 *bp = '\0'; 637 fdp = p->p_fd; 638 slash_prefixed = 0; 639 for (vp = textvp; vp != fdp->fd_rdir && vp != rootvnode;) { 640 if (vp->v_flag & VROOT) { 641 if (vp->v_mount == NULL) { /* forced unmount */ 642 free(buf, M_TEMP); 643 return (EBADF); 644 } 645 vp = vp->v_mount->mnt_vnodecovered; 646 continue; 647 } 648 if (vp != textvp && vp->v_dd->v_id != vp->v_ddid) { 649 numfullpathfail1++; 650 free(buf, M_TEMP); 651 return (ENOTDIR); 652 } 653 ncp = TAILQ_FIRST(&vp->v_cache_dst); 654 if (!ncp) { 655 numfullpathfail2++; 656 free(buf, M_TEMP); 657 return (ENOENT); 658 } 659 if (vp != textvp && ncp->nc_dvp != vp->v_dd) { 660 numfullpathfail3++; 661 free(buf, M_TEMP); 662 return (EBADF); 663 } 664 for (i = ncp->nc_nlen - 1; i >= 0; i--) { 665 if (bp == buf) { 666 numfullpathfail4++; 667 free(buf, M_TEMP); 668 return (ENOMEM); 669 } 670 *--bp = ncp->nc_name[i]; 671 } 672 if (bp == buf) { 673 numfullpathfail4++; 674 free(buf, M_TEMP); 675 return (ENOMEM); 676 } 677 *--bp = '/'; 678 slash_prefixed = 1; 679 vp = ncp->nc_dvp; 680 } 681 if (!slash_prefixed) { 682 if (bp == buf) { 683 numfullpathfail4++; 684 free(buf, M_TEMP); 685 return (ENOMEM); 686 } 687 *--bp = '/'; 688 } 689 numfullpathfound++; 690 *retbuf = bp; 691 *retfreebuf = buf; 692 return (0); 693 } 694