1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2005 Poul-Henning Kamp
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/mount.h>
35 #include <sys/rwlock.h>
36 #include <sys/vnode.h>
37
38 static MALLOC_DEFINE(M_VFS_HASH, "vfs_hash", "VFS hash table");
39
40 static LIST_HEAD(vfs_hash_head, vnode) *vfs_hash_tbl;
41 static LIST_HEAD(,vnode) vfs_hash_side;
42 static u_long vfs_hash_mask;
43 static struct rwlock __exclusive_cache_line vfs_hash_lock;
44
45 static void
vfs_hashinit(void * dummy __unused)46 vfs_hashinit(void *dummy __unused)
47 {
48
49 vfs_hash_tbl = hashinit(desiredvnodes, M_VFS_HASH, &vfs_hash_mask);
50 rw_init(&vfs_hash_lock, "vfs hash");
51 LIST_INIT(&vfs_hash_side);
52 }
53
54 /* Must be SI_ORDER_SECOND so desiredvnodes is available */
55 SYSINIT(vfs_hash, SI_SUB_VFS, SI_ORDER_SECOND, vfs_hashinit, NULL);
56
57 u_int
vfs_hash_index(struct vnode * vp)58 vfs_hash_index(struct vnode *vp)
59 {
60
61 return (vp->v_hash + vp->v_mount->mnt_hashseed);
62 }
63
64 static struct vfs_hash_head *
vfs_hash_bucket(const struct mount * mp,u_int hash)65 vfs_hash_bucket(const struct mount *mp, u_int hash)
66 {
67
68 return (&vfs_hash_tbl[(hash + mp->mnt_hashseed) & vfs_hash_mask]);
69 }
70
71 int
vfs_hash_get(const struct mount * mp,u_int hash,int flags,struct thread * td,struct vnode ** vpp,vfs_hash_cmp_t * fn,void * arg)72 vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td,
73 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
74 {
75 struct vnode *vp;
76 enum vgetstate vs;
77 int error;
78
79 while (1) {
80 rw_rlock(&vfs_hash_lock);
81 LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
82 if (vp->v_hash != hash)
83 continue;
84 if (vp->v_mount != mp)
85 continue;
86 if (fn != NULL && fn(vp, arg))
87 continue;
88 vs = vget_prep(vp);
89 rw_runlock(&vfs_hash_lock);
90 error = vget_finish(vp, flags, vs);
91 if (error == ENOENT && (flags & LK_NOWAIT) == 0)
92 break;
93 if (error != 0)
94 return (error);
95 if (vp->v_hash != hash ||
96 (fn != NULL && fn(vp, arg))) {
97 vput(vp);
98 /* Restart the bucket walk. */
99 break;
100 }
101 *vpp = vp;
102 return (0);
103 }
104 if (vp == NULL) {
105 rw_runlock(&vfs_hash_lock);
106 *vpp = NULL;
107 return (0);
108 }
109 }
110 }
111
112 void
vfs_hash_ref(const struct mount * mp,u_int hash,struct thread * td,struct vnode ** vpp,vfs_hash_cmp_t * fn,void * arg)113 vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td,
114 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
115 {
116 struct vnode *vp;
117
118 while (1) {
119 rw_rlock(&vfs_hash_lock);
120 LIST_FOREACH(vp, vfs_hash_bucket(mp, hash), v_hashlist) {
121 if (vp->v_hash != hash)
122 continue;
123 if (vp->v_mount != mp)
124 continue;
125 if (fn != NULL && fn(vp, arg))
126 continue;
127 vhold(vp);
128 rw_runlock(&vfs_hash_lock);
129 vref(vp);
130 vdrop(vp);
131 *vpp = vp;
132 return;
133 }
134 if (vp == NULL) {
135 rw_runlock(&vfs_hash_lock);
136 *vpp = NULL;
137 return;
138 }
139 }
140 }
141
142 void
vfs_hash_remove(struct vnode * vp)143 vfs_hash_remove(struct vnode *vp)
144 {
145
146 rw_wlock(&vfs_hash_lock);
147 LIST_REMOVE(vp, v_hashlist);
148 rw_wunlock(&vfs_hash_lock);
149 }
150
151 int
vfs_hash_insert(struct vnode * vp,u_int hash,int flags,struct thread * td,struct vnode ** vpp,vfs_hash_cmp_t * fn,void * arg)152 vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td,
153 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg)
154 {
155 struct vnode *vp2;
156 enum vgetstate vs;
157 int error;
158
159 *vpp = NULL;
160 while (1) {
161 rw_wlock(&vfs_hash_lock);
162 LIST_FOREACH(vp2,
163 vfs_hash_bucket(vp->v_mount, hash), v_hashlist) {
164 if (vp2->v_hash != hash)
165 continue;
166 if (vp2->v_mount != vp->v_mount)
167 continue;
168 if (fn != NULL && fn(vp2, arg))
169 continue;
170 vs = vget_prep(vp2);
171 rw_wunlock(&vfs_hash_lock);
172 error = vget_finish(vp2, flags, vs);
173 if (error == ENOENT && (flags & LK_NOWAIT) == 0)
174 break;
175 rw_wlock(&vfs_hash_lock);
176 LIST_INSERT_HEAD(&vfs_hash_side, vp, v_hashlist);
177 rw_wunlock(&vfs_hash_lock);
178 vgone(vp);
179 vput(vp);
180 if (!error)
181 *vpp = vp2;
182 return (error);
183 }
184 if (vp2 == NULL)
185 break;
186 }
187 vp->v_hash = hash;
188 LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
189 rw_wunlock(&vfs_hash_lock);
190 return (0);
191 }
192
193 void
vfs_hash_rehash(struct vnode * vp,u_int hash)194 vfs_hash_rehash(struct vnode *vp, u_int hash)
195 {
196 ASSERT_VOP_ELOCKED(vp, "rehash requires excl lock");
197
198 rw_wlock(&vfs_hash_lock);
199 LIST_REMOVE(vp, v_hashlist);
200 LIST_INSERT_HEAD(vfs_hash_bucket(vp->v_mount, hash), vp, v_hashlist);
201 vp->v_hash = hash;
202 rw_wunlock(&vfs_hash_lock);
203 }
204
205 void
vfs_hash_changesize(u_long newmaxvnodes)206 vfs_hash_changesize(u_long newmaxvnodes)
207 {
208 struct vfs_hash_head *vfs_hash_newtbl, *vfs_hash_oldtbl;
209 u_long vfs_hash_newmask, vfs_hash_oldmask;
210 struct vnode *vp;
211 int i;
212
213 vfs_hash_newtbl = hashinit(newmaxvnodes, M_VFS_HASH,
214 &vfs_hash_newmask);
215 /* If same hash table size, nothing to do */
216 if (vfs_hash_mask == vfs_hash_newmask) {
217 free(vfs_hash_newtbl, M_VFS_HASH);
218 return;
219 }
220 /*
221 * Move everything from the old hash table to the new table.
222 * None of the vnodes in the table can be recycled because to
223 * do so, they have to be removed from the hash table.
224 */
225 rw_wlock(&vfs_hash_lock);
226 vfs_hash_oldtbl = vfs_hash_tbl;
227 vfs_hash_oldmask = vfs_hash_mask;
228 vfs_hash_tbl = vfs_hash_newtbl;
229 vfs_hash_mask = vfs_hash_newmask;
230 for (i = 0; i <= vfs_hash_oldmask; i++) {
231 while ((vp = LIST_FIRST(&vfs_hash_oldtbl[i])) != NULL) {
232 LIST_REMOVE(vp, v_hashlist);
233 LIST_INSERT_HEAD(
234 vfs_hash_bucket(vp->v_mount, vp->v_hash),
235 vp, v_hashlist);
236 }
237 }
238 rw_wunlock(&vfs_hash_lock);
239 free(vfs_hash_oldtbl, M_VFS_HASH);
240 }
241