xref: /freebsd/sys/kern/vfs_hash.c (revision 4db78cacdee1b6f3b7880eb8c5560e8edaf91698)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2005 Poul-Henning Kamp
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/mount.h>
38 #include <sys/rwlock.h>
39 #include <sys/vnode.h>
40 
41 static MALLOC_DEFINE(M_VFS_HASH, "vfs_hash", "VFS hash table");
42 
43 static LIST_HEAD(vfs_hash_head, vnode)	*vfs_hash_tbl;
44 static LIST_HEAD(,vnode)		vfs_hash_side;
45 static u_long				vfs_hash_mask;
46 static struct rwlock			vfs_hash_lock;
47 
48 static void
49 vfs_hashinit(void *dummy __unused)
50 {
51 
52 	vfs_hash_tbl = hashinit(desiredvnodes, M_VFS_HASH, &vfs_hash_mask);
53 	rw_init(&vfs_hash_lock, "vfs hash");
54 	LIST_INIT(&vfs_hash_side);
55 }
56 
57 /* Must be SI_ORDER_SECOND so desiredvnodes is available */
58 SYSINIT(vfs_hash, SI_SUB_VFS, SI_ORDER_SECOND, vfs_hashinit, NULL);
59 
60 u_int
61 vfs_hash_index(struct vnode *vp)
62 {
63 
64 	return (vp->v_hash + vp->v_mount->mnt_hashseed);
65 }
66 
67 static struct vfs_hash_head *
68 vfs_hash_bucket(const struct mount *mp, u_int hash)
69 {
70 
71 	return (&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]);
72 }
73 
74 int
75 vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td,
76     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
77 {
78 	struct vnode *vp;
79 	int error;
80 
81 	while (1) {
82 		rw_rlock(&vfs_hash_lock);
83 		LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
84 			if (vp->v_hash != hash)
85 				continue;
86 			if (vp->v_mount != mp)
87 				continue;
88 			if (fn != NULL && fn(vp, arg))
89 				continue;
90 			vhold(vp);
91 			rw_runlock(&vfs_hash_lock);
92 			error = vget(vp, flags | LK_VNHELD, td);
93 			if (error == ENOENT && (flags & LK_NOWAIT) == 0)
94 				break;
95 			if (error)
96 				return (error);
97 			*vpp = vp;
98 			return (0);
99 		}
100 		if (vp == NULL) {
101 			rw_runlock(&vfs_hash_lock);
102 			*vpp = NULL;
103 			return (0);
104 		}
105 	}
106 }
107 
108 void
109 vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
110     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
111 {
112 	struct vnode *vp;
113 
114 	while (1) {
115 		rw_rlock(&vfs_hash_lock);
116 		LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
117 			if (vp->v_hash != hash)
118 				continue;
119 			if (vp->v_mount != mp)
120 				continue;
121 			if (fn != NULL && fn(vp, arg))
122 				continue;
123 			vhold(vp);
124 			rw_runlock(&vfs_hash_lock);
125 			vref(vp);
126 			vdrop(vp);
127 			*vpp = vp;
128 			return;
129 		}
130 		if (vp == NULL) {
131 			rw_runlock(&vfs_hash_lock);
132 			*vpp = NULL;
133 			return;
134 		}
135 	}
136 }
137 
138 void
139 vfs_hash_remove(struct vnode *vp)
140 {
141 
142 	rw_wlock(&vfs_hash_lock);
143 	LIST_REMOVE(vp, v_hashlist);
144 	rw_wunlock(&vfs_hash_lock);
145 }
146 
147 int
148 vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
149     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
150 {
151 	struct vnode *vp2;
152 	int error;
153 
154 	*vpp = NULL;
155 	while (1) {
156 		rw_wlock(&vfs_hash_lock);
157 		LIST_FOREACH(vp2,
158 		    vfs_hash_bucket(vp->v_mount, hash), v_hashlist) {
159 			if (vp2->v_hash != hash)
160 				continue;
161 			if (vp2->v_mount != vp->v_mount)
162 				continue;
163 			if (fn != NULL && fn(vp2, arg))
164 				continue;
165 			vhold(vp2);
166 			rw_wunlock(&vfs_hash_lock);
167 			error = vget(vp2, flags | LK_VNHELD, td);
168 			if (error == ENOENT && (flags & LK_NOWAIT) == 0)
169 				break;
170 			rw_wlock(&vfs_hash_lock);
171 			LIST_INSERT_HEAD(&vfs_hash_side, vp, v_hashlist);
172 			rw_wunlock(&vfs_hash_lock);
173 			vput(vp);
174 			if (!error)
175 				*vpp = vp2;
176 			return (error);
177 		}
178 		if (vp2 == NULL)
179 			break;
180 
181 	}
182 	vp->v_hash = hash;
183 	LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
184 	rw_wunlock(&vfs_hash_lock);
185 	return (0);
186 }
187 
188 void
189 vfs_hash_rehash(struct vnode *vp, u_int hash)
190 {
191 
192 	rw_wlock(&vfs_hash_lock);
193 	LIST_REMOVE(vp, v_hashlist);
194 	LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
195 	vp->v_hash = hash;
196 	rw_wunlock(&vfs_hash_lock);
197 }
198 
199 void
200 vfs_hash_changesize(int newmaxvnodes)
201 {
202 	struct vfs_hash_head *vfs_hash_newtbl, *vfs_hash_oldtbl;
203 	u_long vfs_hash_newmask, vfs_hash_oldmask;
204 	struct vnode *vp;
205 	int i;
206 
207 	vfs_hash_newtbl = hashinit(newmaxvnodes, M_VFS_HASH,
208 		&vfs_hash_newmask);
209 	/* If same hash table size, nothing to do */
210 	if (vfs_hash_mask == vfs_hash_newmask) {
211 		free(vfs_hash_newtbl, M_VFS_HASH);
212 		return;
213 	}
214 	/*
215 	 * Move everything from the old hash table to the new table.
216 	 * None of the vnodes in the table can be recycled because to
217 	 * do so, they have to be removed from the hash table.
218 	 */
219 	rw_wlock(&vfs_hash_lock);
220 	vfs_hash_oldtbl = vfs_hash_tbl;
221 	vfs_hash_oldmask = vfs_hash_mask;
222 	vfs_hash_tbl = vfs_hash_newtbl;
223 	vfs_hash_mask = vfs_hash_newmask;
224 	for (i = 0; i <= vfs_hash_oldmask; i++) {
225 		while ((vp = LIST_FIRST(&vfs_hash_oldtbl[i])) != NULL) {
226 			LIST_REMOVE(vp, v_hashlist);
227 			LIST_INSERT_HEAD(
228 			    vfs_hash_bucket(vp->v_mount, vp->v_hash),
229 			    vp, v_hashlist);
230 		}
231 	}
232 	rw_wunlock(&vfs_hash_lock);
233 	free(vfs_hash_oldtbl, M_VFS_HASH);
234 }
235