xref: /freebsd/sys/kern/vfs_cache.c (revision ae83180158c4c937f170e31eff311b18c0286a93)
1 /*
2  * Copyright (c) 1989, 1993, 1995
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * Poul-Henning Kamp of the FreeBSD Project.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vfs_cache.c	8.5 (Berkeley) 3/22/95
37  * $FreeBSD$
38  */
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46 #include <sys/mount.h>
47 #include <sys/vnode.h>
48 #include <sys/namei.h>
49 #include <sys/malloc.h>
50 #include <sys/sysproto.h>
51 #include <sys/proc.h>
52 #include <sys/filedesc.h>
53 #include <sys/fnv_hash.h>
54 
55 /*
56  * This structure describes the elements in the cache of recent
57  * names looked up by namei.
58  */
59 
60 struct	namecache {
61 	LIST_ENTRY(namecache) nc_hash;	/* hash chain */
62 	LIST_ENTRY(namecache) nc_src;	/* source vnode list */
63 	TAILQ_ENTRY(namecache) nc_dst;	/* destination vnode list */
64 	struct	vnode *nc_dvp;		/* vnode of parent of name */
65 	struct	vnode *nc_vp;		/* vnode the name refers to */
66 	u_char	nc_flag;		/* flag bits */
67 	u_char	nc_nlen;		/* length of name */
68 	char	nc_name[0];		/* segment name */
69 };
70 
71 /*
72  * Name caching works as follows:
73  *
74  * Names found by directory scans are retained in a cache
75  * for future reference.  It is managed LRU, so frequently
76  * used names will hang around.  Cache is indexed by hash value
77  * obtained from (vp, name) where vp refers to the directory
78  * containing name.
79  *
80  * If it is a "negative" entry, (i.e. for a name that is known NOT to
81  * exist) the vnode pointer will be NULL.
82  *
83  * Upon reaching the last segment of a path, if the reference
84  * is for DELETE, or NOCACHE is set (rewrite), and the
85  * name is located in the cache, it will be dropped.
86  */
87 
88 /*
89  * Structures associated with name cacheing.
90  */
91 #define NCHHASH(hash) \
92 	(&nchashtbl[(hash) & nchash])
93 static LIST_HEAD(nchashhead, namecache) *nchashtbl;	/* Hash Table */
94 static TAILQ_HEAD(, namecache) ncneg;	/* Hash Table */
95 static u_long	nchash;			/* size of hash table */
96 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
97 static u_long	ncnegfactor = 16;	/* ratio of negative entries */
98 SYSCTL_ULONG(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
99 static u_long	numneg;			/* number of cache entries allocated */
100 SYSCTL_ULONG(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
101 static u_long	numcache;		/* number of cache entries allocated */
102 SYSCTL_ULONG(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
103 static u_long	numcachehv;		/* number of cache entries with vnodes held */
104 SYSCTL_ULONG(_debug, OID_AUTO, numcachehv, CTLFLAG_RD, &numcachehv, 0, "");
105 #if 0
106 static u_long	numcachepl;		/* number of cache purge for leaf entries */
107 SYSCTL_ULONG(_debug, OID_AUTO, numcachepl, CTLFLAG_RD, &numcachepl, 0, "");
108 #endif
109 struct	nchstats nchstats;		/* cache effectiveness statistics */
110 
111 static int	doingcache = 1;		/* 1 => enable the cache */
112 SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
113 
114 /* Export size information to userland */
115 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
116 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
117 
118 /*
119  * The new name cache statistics
120  */
121 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
122 #define STATNODE(mode, name, var) \
123 	SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
124 STATNODE(CTLFLAG_RD, numneg, &numneg);
125 STATNODE(CTLFLAG_RD, numcache, &numcache);
126 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
127 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
128 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
129 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
130 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
131 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
132 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
133 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
134 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
135 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
136 
137 SYSCTL_OPAQUE(_vfs_cache, OID_AUTO, nchstats, CTLFLAG_RD, &nchstats,
138         sizeof(nchstats), "LU", "VFS cache effectiveness statistics");
139 
140 
141 
142 static void cache_zap(struct namecache *ncp);
143 
144 static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
145 
146 /*
147  * Flags in namecache.nc_flag
148  */
149 #define NCF_WHITE	1
150 
151 /*
152  * Grab an atomic snapshot of the name cache hash chain lengths
153  */
154 SYSCTL_NODE(_debug, OID_AUTO, hashstat, CTLFLAG_RW, NULL, "hash table stats");
155 
156 static int
157 sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
158 {
159 	int error;
160 	struct nchashhead *ncpp;
161 	struct namecache *ncp;
162 	int n_nchash;
163 	int count;
164 
165 	n_nchash = nchash + 1;	/* nchash is max index, not count */
166 	if (!req->oldptr)
167 		return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
168 
169 	/* Scan hash tables for applicable entries */
170 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
171 		count = 0;
172 		LIST_FOREACH(ncp, ncpp, nc_hash) {
173 			count++;
174 		}
175 		error = SYSCTL_OUT(req, &count, sizeof(count));
176 		if (error)
177 			return (error);
178 	}
179 	return (0);
180 }
181 SYSCTL_PROC(_debug_hashstat, OID_AUTO, rawnchash, CTLTYPE_INT|CTLFLAG_RD,
182 	0, 0, sysctl_debug_hashstat_rawnchash, "S,int", "nchash chain lengths");
183 
184 static int
185 sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
186 {
187 	int error;
188 	struct nchashhead *ncpp;
189 	struct namecache *ncp;
190 	int n_nchash;
191 	int count, maxlength, used, pct;
192 
193 	if (!req->oldptr)
194 		return SYSCTL_OUT(req, 0, 4 * sizeof(int));
195 
196 	n_nchash = nchash + 1;	/* nchash is max index, not count */
197 	used = 0;
198 	maxlength = 0;
199 
200 	/* Scan hash tables for applicable entries */
201 	for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
202 		count = 0;
203 		LIST_FOREACH(ncp, ncpp, nc_hash) {
204 			count++;
205 		}
206 		if (count)
207 			used++;
208 		if (maxlength < count)
209 			maxlength = count;
210 	}
211 	n_nchash = nchash + 1;
212 	pct = (used * 100 * 100) / n_nchash;
213 	error = SYSCTL_OUT(req, &n_nchash, sizeof(n_nchash));
214 	if (error)
215 		return (error);
216 	error = SYSCTL_OUT(req, &used, sizeof(used));
217 	if (error)
218 		return (error);
219 	error = SYSCTL_OUT(req, &maxlength, sizeof(maxlength));
220 	if (error)
221 		return (error);
222 	error = SYSCTL_OUT(req, &pct, sizeof(pct));
223 	if (error)
224 		return (error);
225 	return (0);
226 }
227 SYSCTL_PROC(_debug_hashstat, OID_AUTO, nchash, CTLTYPE_INT|CTLFLAG_RD,
228 	0, 0, sysctl_debug_hashstat_nchash, "I", "nchash chain lengths");
229 
230 /*
231  * Delete an entry from its hash list and move it to the front
232  * of the LRU list for immediate reuse.
233  */
234 static void
235 cache_zap(ncp)
236 	struct namecache *ncp;
237 {
238 	LIST_REMOVE(ncp, nc_hash);
239 	LIST_REMOVE(ncp, nc_src);
240 	if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src)) {
241 		vdrop(ncp->nc_dvp);
242 		numcachehv--;
243 	}
244 	if (ncp->nc_vp) {
245 		TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
246 	} else {
247 		TAILQ_REMOVE(&ncneg, ncp, nc_dst);
248 		numneg--;
249 	}
250 	numcache--;
251 	free(ncp, M_VFSCACHE);
252 }
253 
254 /*
255  * cache_leaf_test()
256  *
257  *      Test whether this (directory) vnode's namei cache entry contains
258  *      subdirectories or not.  Used to determine whether the directory is
259  *      a leaf in the namei cache or not.  Note: the directory may still
260  *      contain files in the namei cache.
261  *
262  *      Returns 0 if the directory is a leaf, -1 if it isn't.
263  */
264 int
265 cache_leaf_test(struct vnode *vp)
266 {
267 	struct namecache *ncpc;
268 
269 	for (ncpc = LIST_FIRST(&vp->v_cache_src);
270 	     ncpc != NULL;
271 	     ncpc = LIST_NEXT(ncpc, nc_src)
272 	) {
273 		if (ncpc->nc_vp != NULL && ncpc->nc_vp->v_type == VDIR)
274 			return(-1);
275 	}
276 	return(0);
277 }
278 
279 /*
280  * Lookup an entry in the cache
281  *
282  * Lookup is called with dvp pointing to the directory to search,
283  * cnp pointing to the name of the entry being sought. If the lookup
284  * succeeds, the vnode is returned in *vpp, and a status of -1 is
285  * returned. If the lookup determines that the name does not exist
286  * (negative cacheing), a status of ENOENT is returned. If the lookup
287  * fails, a status of zero is returned.
288  */
289 
290 int
291 cache_lookup(dvp, vpp, cnp)
292 	struct vnode *dvp;
293 	struct vnode **vpp;
294 	struct componentname *cnp;
295 {
296 	struct namecache *ncp;
297 	u_int32_t hash;
298 
299 	if (!doingcache) {
300 		cnp->cn_flags &= ~MAKEENTRY;
301 		return (0);
302 	}
303 
304 	numcalls++;
305 
306 	if (cnp->cn_nameptr[0] == '.') {
307 		if (cnp->cn_namelen == 1) {
308 			*vpp = dvp;
309 			dothits++;
310 			return (-1);
311 		}
312 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
313 			dotdothits++;
314 			if (dvp->v_dd->v_id != dvp->v_ddid ||
315 			    (cnp->cn_flags & MAKEENTRY) == 0) {
316 				dvp->v_ddid = 0;
317 				return (0);
318 			}
319 			*vpp = dvp->v_dd;
320 			return (-1);
321 		}
322 	}
323 
324 	hash = fnv_32_buf(cnp->cn_nameptr, cnp->cn_namelen, FNV1_32_INIT);
325 	hash = fnv_32_buf(&dvp->v_id, sizeof(dvp->v_id), hash);
326 	LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
327 		numchecks++;
328 		if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
329 		    !bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
330 			break;
331 	}
332 
333 	/* We failed to find an entry */
334 	if (ncp == 0) {
335 		if ((cnp->cn_flags & MAKEENTRY) == 0) {
336 			nummisszap++;
337 		} else {
338 			nummiss++;
339 		}
340 		nchstats.ncs_miss++;
341 		return (0);
342 	}
343 
344 	/* We don't want to have an entry, so dump it */
345 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
346 		numposzaps++;
347 		nchstats.ncs_badhits++;
348 		cache_zap(ncp);
349 		return (0);
350 	}
351 
352 	/* We found a "positive" match, return the vnode */
353         if (ncp->nc_vp) {
354 		numposhits++;
355 		nchstats.ncs_goodhits++;
356 		*vpp = ncp->nc_vp;
357 		return (-1);
358 	}
359 
360 	/* We found a negative match, and want to create it, so purge */
361 	if (cnp->cn_nameiop == CREATE) {
362 		numnegzaps++;
363 		nchstats.ncs_badhits++;
364 		cache_zap(ncp);
365 		return (0);
366 	}
367 
368 	numneghits++;
369 	/*
370 	 * We found a "negative" match, ENOENT notifies client of this match.
371 	 * The nc_vpid field records whether this is a whiteout.
372 	 */
373 	TAILQ_REMOVE(&ncneg, ncp, nc_dst);
374 	TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
375 	nchstats.ncs_neghits++;
376 	if (ncp->nc_flag & NCF_WHITE)
377 		cnp->cn_flags |= ISWHITEOUT;
378 	return (ENOENT);
379 }
380 
381 /*
382  * Add an entry to the cache.
383  */
384 void
385 cache_enter(dvp, vp, cnp)
386 	struct vnode *dvp;
387 	struct vnode *vp;
388 	struct componentname *cnp;
389 {
390 	struct namecache *ncp;
391 	struct nchashhead *ncpp;
392 	u_int32_t hash;
393 	int len;
394 
395 	if (!doingcache)
396 		return;
397 
398 	if (cnp->cn_nameptr[0] == '.') {
399 		if (cnp->cn_namelen == 1) {
400 			return;
401 		}
402 		if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
403 			if (vp) {
404 				dvp->v_dd = vp;
405 				dvp->v_ddid = vp->v_id;
406 			} else {
407 				dvp->v_dd = dvp;
408 				dvp->v_ddid = 0;
409 			}
410 			return;
411 		}
412 	}
413 
414 	ncp = (struct namecache *)
415 		malloc(sizeof *ncp + cnp->cn_namelen, M_VFSCACHE, M_WAITOK);
416 	bzero((char *)ncp, sizeof *ncp);
417 	numcache++;
418 	if (!vp) {
419 		numneg++;
420 		ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
421 	} else if (vp->v_type == VDIR) {
422 		vp->v_dd = dvp;
423 		vp->v_ddid = dvp->v_id;
424 	}
425 
426 	/*
427 	 * Fill in cache info, if vp is NULL this is a "negative" cache entry.
428 	 * For negative entries, we have to record whether it is a whiteout.
429 	 * the whiteout flag is stored in the nc_vpid field which is
430 	 * otherwise unused.
431 	 */
432 	ncp->nc_vp = vp;
433 	ncp->nc_dvp = dvp;
434 	len = ncp->nc_nlen = cnp->cn_namelen;
435 	hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
436 	bcopy(cnp->cn_nameptr, ncp->nc_name, len);
437 	hash = fnv_32_buf(&dvp->v_id, sizeof(dvp->v_id), hash);
438 	ncpp = NCHHASH(hash);
439 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
440 	if (LIST_EMPTY(&dvp->v_cache_src)) {
441 		vhold(dvp);
442 		numcachehv++;
443 	}
444 	LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
445 	if (vp) {
446 		TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
447 	} else {
448 		TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
449 	}
450 	if (numneg * ncnegfactor > numcache) {
451 		ncp = TAILQ_FIRST(&ncneg);
452 		cache_zap(ncp);
453 	}
454 }
455 
456 /*
457  * Name cache initialization, from vfs_init() when we are booting
458  */
459 static void
460 nchinit(void *dummy __unused)
461 {
462 
463 	TAILQ_INIT(&ncneg);
464 	nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
465 }
466 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL)
467 
468 
469 /*
470  * Invalidate all entries to a particular vnode.
471  *
472  * Remove all entries in the namecache relating to this vnode and
473  * change the v_id.  We take the v_id from a global counter, since
474  * it becomes a handy sequence number in crash-dumps that way.
475  * No valid vnode will ever have (v_id == 0).
476  *
477  * XXX: Only time and the size of v_id prevents this from failing:
478  * XXX: In theory we should hunt down all (struct vnode*, v_id)
479  * XXX: soft references and nuke them, at least on the global
480  * XXX: v_id wraparound.  The period of resistance can be extended
481  * XXX: by incrementing each vnodes v_id individually instead of
482  * XXX: using the global v_id.
483  */
484 
485 void
486 cache_purge(vp)
487 	struct vnode *vp;
488 {
489 	static u_long nextid;
490 
491 	while (!LIST_EMPTY(&vp->v_cache_src))
492 		cache_zap(LIST_FIRST(&vp->v_cache_src));
493 	while (!TAILQ_EMPTY(&vp->v_cache_dst))
494 		cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
495 
496 	do
497 		nextid++;
498 	while (nextid == vp->v_id || !nextid);
499 	vp->v_id = nextid;
500 	vp->v_dd = vp;
501 	vp->v_ddid = 0;
502 }
503 
504 /*
505  * Flush all entries referencing a particular filesystem.
506  *
507  * Since we need to check it anyway, we will flush all the invalid
508  * entries at the same time.
509  */
510 void
511 cache_purgevfs(mp)
512 	struct mount *mp;
513 {
514 	struct nchashhead *ncpp;
515 	struct namecache *ncp, *nnp;
516 
517 	/* Scan hash tables for applicable entries */
518 	for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
519 		for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
520 			nnp = LIST_NEXT(ncp, nc_hash);
521 			if (ncp->nc_dvp->v_mount == mp) {
522 				cache_zap(ncp);
523 			}
524 		}
525 	}
526 }
527 
528 /*
529  * Perform canonical checks and cache lookup and pass on to filesystem
530  * through the vop_cachedlookup only if needed.
531  */
532 
533 int
534 vfs_cache_lookup(ap)
535 	struct vop_lookup_args /* {
536 		struct vnode *a_dvp;
537 		struct vnode **a_vpp;
538 		struct componentname *a_cnp;
539 	} */ *ap;
540 {
541 	struct vnode *dvp, *vp;
542 	int lockparent;
543 	int error;
544 	struct vnode **vpp = ap->a_vpp;
545 	struct componentname *cnp = ap->a_cnp;
546 	struct ucred *cred = cnp->cn_cred;
547 	int flags = cnp->cn_flags;
548 	struct thread *td = cnp->cn_thread;
549 	u_long vpid;	/* capability number of vnode */
550 
551 	*vpp = NULL;
552 	dvp = ap->a_dvp;
553 	lockparent = flags & LOCKPARENT;
554 
555 	if (dvp->v_type != VDIR)
556                 return (ENOTDIR);
557 
558 	if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
559 	    (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
560 		return (EROFS);
561 
562 	error = VOP_ACCESS(dvp, VEXEC, cred, td);
563 
564 	if (error)
565 		return (error);
566 
567 	error = cache_lookup(dvp, vpp, cnp);
568 
569 #ifdef LOOKUP_SHARED
570 	if (!error) {
571 		/* We do this because the rest of the system now expects to get
572 		 * a shared lock, which is later upgraded if LOCKSHARED is not
573 		 * set.  We have so many cases here because of bugs that yield
574 		 * inconsistant lock states.  This all badly needs to be fixed
575 		 */
576 		error = VOP_CACHEDLOOKUP(dvp, vpp, cnp);
577 		if (!error) {
578 			int flock;
579 
580 			flock = VOP_ISLOCKED(*vpp, td);
581 			if (flock != LK_EXCLUSIVE) {
582 				if (flock == 0) {
583 					if ((flags & ISLASTCN) &&
584 					    (flags & LOCKSHARED))
585 						VOP_LOCK(*vpp, LK_SHARED, td);
586 					else
587 						VOP_LOCK(*vpp, LK_EXCLUSIVE, td);
588 				}
589 			} else if ((flags & ISLASTCN) && (flags & LOCKSHARED))
590 				VOP_LOCK(*vpp, LK_DOWNGRADE, td);
591 		}
592 		return (error);
593 	}
594 #else
595 	if (!error)
596 		return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
597 #endif
598 
599 	if (error == ENOENT)
600 		return (error);
601 
602 	vp = *vpp;
603 	vpid = vp->v_id;
604 	cnp->cn_flags &= ~PDIRUNLOCK;
605 	if (dvp == vp) {   /* lookup on "." */
606 		VREF(vp);
607 		error = 0;
608 	} else if (flags & ISDOTDOT) {
609 		VOP_UNLOCK(dvp, 0, td);
610 		cnp->cn_flags |= PDIRUNLOCK;
611 #ifdef LOOKUP_SHARED
612 		if ((flags & ISLASTCN) && (flags & LOCKSHARED))
613 			error = vget(vp, LK_SHARED, td);
614 		else
615 			error = vget(vp, LK_EXCLUSIVE, td);
616 #else
617 		error = vget(vp, LK_EXCLUSIVE, td);
618 #endif
619 
620 		if (!error && lockparent && (flags & ISLASTCN)) {
621 			if ((error = vn_lock(dvp, LK_EXCLUSIVE, td)) == 0)
622 				cnp->cn_flags &= ~PDIRUNLOCK;
623 		}
624 	} else {
625 #ifdef LOOKUP_SHARED
626 		if ((flags & ISLASTCN) && (flags & LOCKSHARED))
627 			error = vget(vp, LK_SHARED, td);
628 		else
629 			error = vget(vp, LK_EXCLUSIVE, td);
630 #else
631 		error = vget(vp, LK_EXCLUSIVE, td);
632 #endif
633 		if (!lockparent || error || !(flags & ISLASTCN)) {
634 			VOP_UNLOCK(dvp, 0, td);
635 			cnp->cn_flags |= PDIRUNLOCK;
636 		}
637 	}
638 	/*
639 	 * Check that the capability number did not change
640 	 * while we were waiting for the lock.
641 	 */
642 	if (!error) {
643 		if (vpid == vp->v_id)
644 			return (0);
645 		vput(vp);
646 		if (lockparent && dvp != vp && (flags & ISLASTCN)) {
647 			VOP_UNLOCK(dvp, 0, td);
648 			cnp->cn_flags |= PDIRUNLOCK;
649 		}
650 	}
651 	if (cnp->cn_flags & PDIRUNLOCK) {
652 		error = vn_lock(dvp, LK_EXCLUSIVE, td);
653 		if (error)
654 			return (error);
655 		cnp->cn_flags &= ~PDIRUNLOCK;
656 	}
657 #ifdef LOOKUP_SHARED
658 	error = VOP_CACHEDLOOKUP(dvp, vpp, cnp);
659 
660 	if (!error) {
661 		int flock = 0;
662 
663 		flock = VOP_ISLOCKED(*vpp, td);
664 		if (flock != LK_EXCLUSIVE) {
665 			if (flock == 0) {
666 				if ((flags & ISLASTCN) && (flags & LOCKSHARED))
667 					VOP_LOCK(*vpp, LK_SHARED, td);
668 				else
669 					VOP_LOCK(*vpp, LK_EXCLUSIVE, td);
670 			}
671 		} else if ((flags & ISLASTCN) && (flags & LOCKSHARED))
672 			VOP_LOCK(*vpp, LK_DOWNGRADE, td);
673 	}
674 
675 	return (error);
676 #else
677 	return (VOP_CACHEDLOOKUP(dvp, vpp, cnp));
678 #endif
679 }
680 
681 
682 #ifndef _SYS_SYSPROTO_H_
683 struct  __getcwd_args {
684 	u_char	*buf;
685 	u_int	buflen;
686 };
687 #endif
688 
689 /*
690  * XXX All of these sysctls would probably be more productive dead.
691  */
692 static int disablecwd;
693 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0,
694    "Disable the getcwd syscall");
695 
696 /* Various statistics for the getcwd syscall */
697 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
698 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
699 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
700 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
701 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
702 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
703 
704 /* Implementation of the getcwd syscall */
705 int
706 __getcwd(td, uap)
707 	struct thread *td;
708 	struct __getcwd_args *uap;
709 {
710 	char *bp, *buf;
711 	int error, i, slash_prefixed;
712 	struct filedesc *fdp;
713 	struct namecache *ncp;
714 	struct vnode *vp;
715 
716 	numcwdcalls++;
717 	if (disablecwd)
718 		return (ENODEV);
719 	if (uap->buflen < 2)
720 		return (EINVAL);
721 	if (uap->buflen > MAXPATHLEN)
722 		uap->buflen = MAXPATHLEN;
723 	buf = bp = malloc(uap->buflen, M_TEMP, M_WAITOK);
724 	bp += uap->buflen - 1;
725 	*bp = '\0';
726 	fdp = td->td_proc->p_fd;
727 	slash_prefixed = 0;
728 	FILEDESC_LOCK(fdp);
729 	mp_fixme("No vnode locking done!");
730 	for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
731 		if (vp->v_vflag & VV_ROOT) {
732 			if (vp->v_mount == NULL) {	/* forced unmount */
733 				FILEDESC_UNLOCK(fdp);
734 				free(buf, M_TEMP);
735 				return (EBADF);
736 			}
737 			vp = vp->v_mount->mnt_vnodecovered;
738 			continue;
739 		}
740 		if (vp->v_dd->v_id != vp->v_ddid) {
741 			FILEDESC_UNLOCK(fdp);
742 			numcwdfail1++;
743 			free(buf, M_TEMP);
744 			return (ENOTDIR);
745 		}
746 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
747 		if (!ncp) {
748 			FILEDESC_UNLOCK(fdp);
749 			numcwdfail2++;
750 			free(buf, M_TEMP);
751 			return (ENOENT);
752 		}
753 		if (ncp->nc_dvp != vp->v_dd) {
754 			FILEDESC_UNLOCK(fdp);
755 			numcwdfail3++;
756 			free(buf, M_TEMP);
757 			return (EBADF);
758 		}
759 		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
760 			if (bp == buf) {
761 				FILEDESC_UNLOCK(fdp);
762 				numcwdfail4++;
763 				free(buf, M_TEMP);
764 				return (ENOMEM);
765 			}
766 			*--bp = ncp->nc_name[i];
767 		}
768 		if (bp == buf) {
769 			FILEDESC_UNLOCK(fdp);
770 			numcwdfail4++;
771 			free(buf, M_TEMP);
772 			return (ENOMEM);
773 		}
774 		*--bp = '/';
775 		slash_prefixed = 1;
776 		vp = vp->v_dd;
777 	}
778 	FILEDESC_UNLOCK(fdp);
779 	if (!slash_prefixed) {
780 		if (bp == buf) {
781 			numcwdfail4++;
782 			free(buf, M_TEMP);
783 			return (ENOMEM);
784 		}
785 		*--bp = '/';
786 	}
787 	numcwdfound++;
788 	error = copyout(bp, uap->buf, strlen(bp) + 1);
789 	free(buf, M_TEMP);
790 	return (error);
791 }
792 
793 /*
794  * Thus begins the fullpath magic.
795  */
796 
797 #undef STATNODE
798 #define STATNODE(name)							\
799 	static u_int name;						\
800 	SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
801 
802 static int disablefullpath;
803 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW, &disablefullpath, 0,
804 	"Disable the vn_fullpath function");
805 
806 STATNODE(numfullpathcalls);
807 STATNODE(numfullpathfail1);
808 STATNODE(numfullpathfail2);
809 STATNODE(numfullpathfail3);
810 STATNODE(numfullpathfail4);
811 STATNODE(numfullpathfound);
812 
813 /*
814  * Retrieve the full filesystem path that correspond to a vnode from the name
815  * cache (if available)
816  */
817 int
818 vn_fullpath(struct thread *td, struct vnode *vn, char **retbuf, char **freebuf)
819 {
820 	char *bp, *buf;
821 	int i, slash_prefixed;
822 	struct filedesc *fdp;
823 	struct namecache *ncp;
824 	struct vnode *vp;
825 
826 	numfullpathcalls++;
827 	if (disablefullpath)
828 		return (ENODEV);
829 	if (vn == NULL)
830 		return (EINVAL);
831 	buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
832 	bp = buf + MAXPATHLEN - 1;
833 	*bp = '\0';
834 	fdp = td->td_proc->p_fd;
835 	slash_prefixed = 0;
836 	FILEDESC_LOCK(fdp);
837 	for (vp = vn; vp != fdp->fd_rdir && vp != rootvnode;) {
838 		ASSERT_VOP_LOCKED(vp, "vn_fullpath");
839 		if (vp->v_vflag & VV_ROOT) {
840 			if (vp->v_mount == NULL) {	/* forced unmount */
841 				FILEDESC_UNLOCK(fdp);
842 				free(buf, M_TEMP);
843 				return (EBADF);
844 			}
845 			vp = vp->v_mount->mnt_vnodecovered;
846 			continue;
847 		}
848 		if (vp != vn && vp->v_dd->v_id != vp->v_ddid) {
849 			FILEDESC_UNLOCK(fdp);
850 			numfullpathfail1++;
851 			free(buf, M_TEMP);
852 			return (ENOTDIR);
853 		}
854 		ncp = TAILQ_FIRST(&vp->v_cache_dst);
855 		if (!ncp) {
856 			FILEDESC_UNLOCK(fdp);
857 			numfullpathfail2++;
858 			free(buf, M_TEMP);
859 			return (ENOENT);
860 		}
861 		if (vp != vn && ncp->nc_dvp != vp->v_dd) {
862 			FILEDESC_UNLOCK(fdp);
863 			numfullpathfail3++;
864 			free(buf, M_TEMP);
865 			return (EBADF);
866 		}
867 		for (i = ncp->nc_nlen - 1; i >= 0; i--) {
868 			if (bp == buf) {
869 				FILEDESC_UNLOCK(fdp);
870 				numfullpathfail4++;
871 				free(buf, M_TEMP);
872 				return (ENOMEM);
873 			}
874 			*--bp = ncp->nc_name[i];
875 		}
876 		if (bp == buf) {
877 			FILEDESC_UNLOCK(fdp);
878 			numfullpathfail4++;
879 			free(buf, M_TEMP);
880 			return (ENOMEM);
881 		}
882 		*--bp = '/';
883 		slash_prefixed = 1;
884 		vp = ncp->nc_dvp;
885 	}
886 	if (!slash_prefixed) {
887 		if (bp == buf) {
888 			FILEDESC_UNLOCK(fdp);
889 			numfullpathfail4++;
890 			free(buf, M_TEMP);
891 			return (ENOMEM);
892 		}
893 		*--bp = '/';
894 	}
895 	FILEDESC_UNLOCK(fdp);
896 	numfullpathfound++;
897 	*retbuf = bp;
898 	*freebuf = buf;
899 	return (0);
900 }
901