xref: /freebsd/sys/kern/vfs_hash.c (revision 2008043f386721d58158e37e0d7e50df8095942d)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 Poul-Henning Kamp
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/malloc.h>
35 #include <sys/mount.h>
36 #include <sys/rwlock.h>
37 #include <sys/vnode.h>
38 
39 static MALLOC_DEFINE(M_VFS_HASH, "vfs_hash", "VFS hash table");
40 
41 static LIST_HEAD(vfs_hash_head, vnode)	*vfs_hash_tbl;
42 static LIST_HEAD(,vnode)		vfs_hash_side;
43 static u_long				vfs_hash_mask;
44 static struct rwlock __exclusive_cache_line vfs_hash_lock;
45 
46 static void
47 vfs_hashinit(void *dummy __unused)
48 {
49 
50 	vfs_hash_tbl = hashinit(desiredvnodes, M_VFS_HASH, &vfs_hash_mask);
51 	rw_init(&vfs_hash_lock, "vfs hash");
52 	LIST_INIT(&vfs_hash_side);
53 }
54 
55 /* Must be SI_ORDER_SECOND so desiredvnodes is available */
56 SYSINIT(vfs_hash, SI_SUB_VFS, SI_ORDER_SECOND, vfs_hashinit, NULL);
57 
58 u_int
59 vfs_hash_index(struct vnode *vp)
60 {
61 
62 	return (vp->v_hash + vp->v_mount->mnt_hashseed);
63 }
64 
65 static struct vfs_hash_head *
66 vfs_hash_bucket(const struct mount *mp, u_int hash)
67 {
68 
69 	return (&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]);
70 }
71 
72 int
73 vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td,
74     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
75 {
76 	struct vnode *vp;
77 	enum vgetstate vs;
78 	int error;
79 
80 	while (1) {
81 		rw_rlock(&vfs_hash_lock);
82 		LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
83 			if (vp->v_hash != hash)
84 				continue;
85 			if (vp->v_mount != mp)
86 				continue;
87 			if (fn != NULL && fn(vp, arg))
88 				continue;
89 			vs = vget_prep(vp);
90 			rw_runlock(&vfs_hash_lock);
91 			error = vget_finish(vp, flags, vs);
92 			if (error == ENOENT && (flags & LK_NOWAIT) == 0)
93 				break;
94 			if (error != 0)
95 				return (error);
96 			if (vp->v_hash != hash ||
97 			    (fn != NULL && fn(vp, arg))) {
98 				vput(vp);
99 				/* Restart the bucket walk. */
100 				break;
101 			}
102 			*vpp = vp;
103 			return (0);
104 		}
105 		if (vp == NULL) {
106 			rw_runlock(&vfs_hash_lock);
107 			*vpp = NULL;
108 			return (0);
109 		}
110 	}
111 }
112 
113 void
114 vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
115     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
116 {
117 	struct vnode *vp;
118 
119 	while (1) {
120 		rw_rlock(&vfs_hash_lock);
121 		LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
122 			if (vp->v_hash != hash)
123 				continue;
124 			if (vp->v_mount != mp)
125 				continue;
126 			if (fn != NULL && fn(vp, arg))
127 				continue;
128 			vhold(vp);
129 			rw_runlock(&vfs_hash_lock);
130 			vref(vp);
131 			vdrop(vp);
132 			*vpp = vp;
133 			return;
134 		}
135 		if (vp == NULL) {
136 			rw_runlock(&vfs_hash_lock);
137 			*vpp = NULL;
138 			return;
139 		}
140 	}
141 }
142 
143 void
144 vfs_hash_remove(struct vnode *vp)
145 {
146 
147 	rw_wlock(&vfs_hash_lock);
148 	LIST_REMOVE(vp, v_hashlist);
149 	rw_wunlock(&vfs_hash_lock);
150 }
151 
152 int
153 vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
154     struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
155 {
156 	struct vnode *vp2;
157 	enum vgetstate vs;
158 	int error;
159 
160 	*vpp = NULL;
161 	while (1) {
162 		rw_wlock(&vfs_hash_lock);
163 		LIST_FOREACH(vp2,
164 		    vfs_hash_bucket(vp->v_mount, hash), v_hashlist) {
165 			if (vp2->v_hash != hash)
166 				continue;
167 			if (vp2->v_mount != vp->v_mount)
168 				continue;
169 			if (fn != NULL && fn(vp2, arg))
170 				continue;
171 			vs = vget_prep(vp2);
172 			rw_wunlock(&vfs_hash_lock);
173 			error = vget_finish(vp2, flags, vs);
174 			if (error == ENOENT && (flags & LK_NOWAIT) == 0)
175 				break;
176 			rw_wlock(&vfs_hash_lock);
177 			LIST_INSERT_HEAD(&vfs_hash_side, vp, v_hashlist);
178 			rw_wunlock(&vfs_hash_lock);
179 			vgone(vp);
180 			vput(vp);
181 			if (!error)
182 				*vpp = vp2;
183 			return (error);
184 		}
185 		if (vp2 == NULL)
186 			break;
187 	}
188 	vp->v_hash = hash;
189 	LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
190 	rw_wunlock(&vfs_hash_lock);
191 	return (0);
192 }
193 
194 void
195 vfs_hash_rehash(struct vnode *vp, u_int hash)
196 {
197 	ASSERT_VOP_ELOCKED(vp, "rehash requires excl lock");
198 
199 	rw_wlock(&vfs_hash_lock);
200 	LIST_REMOVE(vp, v_hashlist);
201 	LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
202 	vp->v_hash = hash;
203 	rw_wunlock(&vfs_hash_lock);
204 }
205 
206 void
207 vfs_hash_changesize(u_long newmaxvnodes)
208 {
209 	struct vfs_hash_head *vfs_hash_newtbl, *vfs_hash_oldtbl;
210 	u_long vfs_hash_newmask, vfs_hash_oldmask;
211 	struct vnode *vp;
212 	int i;
213 
214 	vfs_hash_newtbl = hashinit(newmaxvnodes, M_VFS_HASH,
215 		&vfs_hash_newmask);
216 	/* If same hash table size, nothing to do */
217 	if (vfs_hash_mask == vfs_hash_newmask) {
218 		free(vfs_hash_newtbl, M_VFS_HASH);
219 		return;
220 	}
221 	/*
222 	 * Move everything from the old hash table to the new table.
223 	 * None of the vnodes in the table can be recycled because to
224 	 * do so, they have to be removed from the hash table.
225 	 */
226 	rw_wlock(&vfs_hash_lock);
227 	vfs_hash_oldtbl = vfs_hash_tbl;
228 	vfs_hash_oldmask = vfs_hash_mask;
229 	vfs_hash_tbl = vfs_hash_newtbl;
230 	vfs_hash_mask = vfs_hash_newmask;
231 	for (i = 0; i <= vfs_hash_oldmask; i++) {
232 		while ((vp = LIST_FIRST(&vfs_hash_oldtbl[i])) != NULL) {
233 			LIST_REMOVE(vp, v_hashlist);
234 			LIST_INSERT_HEAD(
235 			    vfs_hash_bucket(vp->v_mount, vp->v_hash),
236 			    vp, v_hashlist);
237 		}
238 	}
239 	rw_wunlock(&vfs_hash_lock);
240 	free(vfs_hash_oldtbl, M_VFS_HASH);
241 }
242