xref: /freebsd/sys/kern/vfs_hash.c (revision 63d1fd5970ec814904aa0f4580b10a0d302d08b2)
1 /*-
2  * Copyright (c) 2005 Poul-Henning Kamp
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mount.h>
36 #include <sys/rwlock.h>
37 #include <sys/vnode.h>
38 
39 static MALLOC_DEFINE(M_VFS_HASH, "vfs_hash", "VFS hash table");
40 
41 static LIST_HEAD(vfs_hash_head, vnode)	*vfs_hash_tbl;
42 static LIST_HEAD(,vnode)		vfs_hash_side;
43 static u_long				vfs_hash_mask;
44 static struct rwlock			vfs_hash_lock;
45 
46 static void
47 vfs_hashinit(void *dummy __unused)
48 {
49 
50 	vfs_hash_tbl = hashinit(desiredvnodes, M_VFS_HASH, &vfs_hash_mask);
51 	rw_init(&vfs_hash_lock, "vfs hash");
52 	LIST_INIT(&vfs_hash_side);
53 }
54 
55 /* Must be SI_ORDER_SECOND so desiredvnodes is available */
56 SYSINIT(vfs_hash, SI_SUB_VFS, SI_ORDER_SECOND, vfs_hashinit, NULL);
57 
58 u_int
59 vfs_hash_index(struct vnode *vp)
60 {
61 
62 	return (vp->v_hash + vp->v_mount->mnt_hashseed);
63 }
64 
65 static struct vfs_hash_head *
66 vfs_hash_bucket(const struct mount *mp, u_int hash)
67 {
68 
69 	return (&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]);
70 }
71 
72 int
73 vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td,
74     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
75 {
76 	struct vnode *vp;
77 	int error;
78 
79 	while (1) {
80 		rw_rlock(&vfs_hash_lock);
81 		LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
82 			if (vp->v_hash != hash)
83 				continue;
84 			if (vp->v_mount != mp)
85 				continue;
86 			if (fn != NULL && fn(vp, arg))
87 				continue;
88 			vhold(vp);
89 			rw_runlock(&vfs_hash_lock);
90 			error = vget(vp, flags | LK_VNHELD, td);
91 			if (error == ENOENT && (flags & LK_NOWAIT) == 0)
92 				break;
93 			if (error)
94 				return (error);
95 			*vpp = vp;
96 			return (0);
97 		}
98 		if (vp == NULL) {
99 			rw_runlock(&vfs_hash_lock);
100 			*vpp = NULL;
101 			return (0);
102 		}
103 	}
104 }
105 
106 void
107 vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
108     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
109 {
110 	struct vnode *vp;
111 
112 	while (1) {
113 		rw_rlock(&vfs_hash_lock);
114 		LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
115 			if (vp->v_hash != hash)
116 				continue;
117 			if (vp->v_mount != mp)
118 				continue;
119 			if (fn != NULL && fn(vp, arg))
120 				continue;
121 			vhold(vp);
122 			rw_runlock(&vfs_hash_lock);
123 			vref(vp);
124 			vdrop(vp);
125 			*vpp = vp;
126 			return;
127 		}
128 		if (vp == NULL) {
129 			rw_runlock(&vfs_hash_lock);
130 			*vpp = NULL;
131 			return;
132 		}
133 	}
134 }
135 
136 void
137 vfs_hash_remove(struct vnode *vp)
138 {
139 
140 	rw_wlock(&vfs_hash_lock);
141 	LIST_REMOVE(vp, v_hashlist);
142 	rw_wunlock(&vfs_hash_lock);
143 }
144 
145 int
146 vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
147     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
148 {
149 	struct vnode *vp2;
150 	int error;
151 
152 	*vpp = NULL;
153 	while (1) {
154 		rw_wlock(&vfs_hash_lock);
155 		LIST_FOREACH(vp2,
156 		    vfs_hash_bucket(vp->v_mount, hash), v_hashlist) {
157 			if (vp2->v_hash != hash)
158 				continue;
159 			if (vp2->v_mount != vp->v_mount)
160 				continue;
161 			if (fn != NULL && fn(vp2, arg))
162 				continue;
163 			vhold(vp2);
164 			rw_wunlock(&vfs_hash_lock);
165 			error = vget(vp2, flags | LK_VNHELD, td);
166 			if (error == ENOENT && (flags & LK_NOWAIT) == 0)
167 				break;
168 			rw_wlock(&vfs_hash_lock);
169 			LIST_INSERT_HEAD(&vfs_hash_side, vp, v_hashlist);
170 			rw_wunlock(&vfs_hash_lock);
171 			vput(vp);
172 			if (!error)
173 				*vpp = vp2;
174 			return (error);
175 		}
176 		if (vp2 == NULL)
177 			break;
178 
179 	}
180 	vp->v_hash = hash;
181 	LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
182 	rw_wunlock(&vfs_hash_lock);
183 	return (0);
184 }
185 
186 void
187 vfs_hash_rehash(struct vnode *vp, u_int hash)
188 {
189 
190 	rw_wlock(&vfs_hash_lock);
191 	LIST_REMOVE(vp, v_hashlist);
192 	LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
193 	vp->v_hash = hash;
194 	rw_wunlock(&vfs_hash_lock);
195 }
196 
197 void
198 vfs_hash_changesize(int newmaxvnodes)
199 {
200 	struct vfs_hash_head *vfs_hash_newtbl, *vfs_hash_oldtbl;
201 	u_long vfs_hash_newmask, vfs_hash_oldmask;
202 	struct vnode *vp;
203 	int i;
204 
205 	vfs_hash_newtbl = hashinit(newmaxvnodes, M_VFS_HASH,
206 		&vfs_hash_newmask);
207 	/* If same hash table size, nothing to do */
208 	if (vfs_hash_mask == vfs_hash_newmask) {
209 		free(vfs_hash_newtbl, M_VFS_HASH);
210 		return;
211 	}
212 	/*
213 	 * Move everything from the old hash table to the new table.
214 	 * None of the vnodes in the table can be recycled because to
215 	 * do so, they have to be removed from the hash table.
216 	 */
217 	rw_wlock(&vfs_hash_lock);
218 	vfs_hash_oldtbl = vfs_hash_tbl;
219 	vfs_hash_oldmask = vfs_hash_mask;
220 	vfs_hash_tbl = vfs_hash_newtbl;
221 	vfs_hash_mask = vfs_hash_newmask;
222 	for (i = 0; i <= vfs_hash_oldmask; i++) {
223 		while ((vp = LIST_FIRST(&vfs_hash_oldtbl[i])) != NULL) {
224 			LIST_REMOVE(vp, v_hashlist);
225 			LIST_INSERT_HEAD(
226 			    vfs_hash_bucket(vp->v_mount, vp->v_hash),
227 			    vp, v_hashlist);
228 		}
229 	}
230 	rw_wunlock(&vfs_hash_lock);
231 	free(vfs_hash_oldtbl, M_VFS_HASH);
232 }
233