vfs_init.c (273350ad0f023296911c58749d529b5f9ca51634) vfs_init.c (aec0fb7b40e4cf877bea663f2d86dd07c3524fe8)
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project

--- 69 unchanged lines hidden (view full) ---

78 * assume that NFS needed to be modified to support Ficus. NFS has an entry
79 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by
80 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions)
81 * listing those new operations Ficus adds to NFS, all without modifying the
82 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but
83 * that is a(whole)nother story.) This is a feature.
84 */
85
1/*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from software contributed
6 * to Berkeley by John Heidemann of the UCLA Ficus project.
7 *
8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project

--- 69 unchanged lines hidden (view full) ---

78 * assume that NFS needed to be modified to support Ficus. NFS has an entry
79 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by
80 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions)
81 * listing those new operations Ficus adds to NFS, all without modifying the
82 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but
83 * that is a(whole)nother story.) This is a feature.
84 */
85
86/* Table of known vnodeop vectors (list of VFS vnode vectors) */
87static const struct vnodeopv_desc **vnodeopv_descs;
88static int vnodeopv_num;
86/*
87 * Routines having to do with the management of the vnode table.
88 */
89
89
90/* Table of known descs (list of vnode op handlers "vop_access_desc") */
91static struct vnodeop_desc **vfs_op_descs;
92/* Reference counts for vfs_op_descs */
93static int *vfs_op_desc_refs;
94/* Number of descriptions */
95static int num_op_descs;
96/* Number of entries in each description */
97static int vfs_opv_numops = 64;
98
99/* Allow this number to be tuned at boot */
100TUNABLE_INT("vfs.opv_numops", &vfs_opv_numops);
101SYSCTL_INT(_vfs, OID_AUTO, opv_numops, CTLFLAG_RDTUN, &vfs_opv_numops,
102 0, "Maximum number of operations in vop_t vector");
103
104static int int_cmp(const void *a, const void *b);
105
106static int
107int_cmp(const void *a, const void *b)
108{
109 return(*(const int *)a - *(const int *)b);
110}
111
112/*
90/*
113 * Recalculate the operations vector/description (those parts of it that can
114 * be recalculated, that is.)
115 * Always allocate operations vector large enough to hold vfs_opv_numops
116 * entries. The vector is never freed or deallocated once it is initialized,
117 * so that vnodes might safely reference it through their v_op pointer without
118 * vector changing suddenly from under them.
91 * XXX: hack alert
119 */
92 */
120static void
121vfs_opv_recalc(void)
93int
94vcall(struct vnode *vp, u_int off, void *ap)
122{
95{
123 int i, j, k;
124 int *vfs_op_offsets;
125 vop_t ***opv_desc_vector_p;
126 vop_t **opv_desc_vector;
127 struct vnodeopv_entry_desc *opve_descp;
128 const struct vnodeopv_desc *opv;
129
130 if (vfs_op_descs == NULL)
131 panic("vfs_opv_recalc called with null vfs_op_descs");
132
133 /*
134 * Allocate and initialize temporary array to store
135 * offsets. Sort it to put all uninitialized entries
136 * first and to make holes in existing offset sequence
137 * detectable.
138 */
139 MALLOC(vfs_op_offsets, int *,
140 num_op_descs * sizeof(int), M_TEMP, M_WAITOK);
141 if (vfs_op_offsets == NULL)
142 panic("vfs_opv_recalc: no memory");
143 for (i = 0; i < num_op_descs; i++)
144 vfs_op_offsets[i] = vfs_op_descs[i]->vdesc_offset;
145 qsort(vfs_op_offsets, num_op_descs, sizeof(int), int_cmp);
146
147 /*
148 * Run through and make sure all known descs have an offset.
149 * Use vfs_op_offsets to locate holes in offset sequence and
150 * reuse them.
151 * vop_default_desc is hardwired at offset 1, and offset 0
152 * is a panic sanity check.
153 */
154 j = 1; k = 1;
155 for (i = 0; i < num_op_descs; i++) {
156 if (vfs_op_descs[i]->vdesc_offset != 0)
96 struct vop_vector *vop = vp->v_op;
97 vop_bypass_t **bpt;
98 int rc;
99
100 for(;;) {
101 bpt = (void *)((u_char *)vop + off);
102 if (vop != NULL && *bpt == NULL && vop->vop_bypass == NULL) {
103 vop = vop->vop_default;
157 continue;
104 continue;
158 /*
159 * Look at two adjacent entries vfs_op_offsets[j - 1] and
160 * vfs_op_offsets[j] and see if we can fit a new offset
161 * number in between. If not, look at the next pair until
162 * hole is found or the end of the vfs_op_offsets vector is
163 * reached. j has been initialized to 1 above so that
164 * referencing (j-1)-th element is safe and the loop will
165 * never execute if num_op_descs is 1. For each new value s
166 * of i the j loop pick up from where previous iteration has
167 * left off. When the last hole has been consumed or if no
168 * hole has been found, we will start allocating new numbers
169 * starting from the biggest already available offset + 1.
170 */
171 for (; j < num_op_descs; j++) {
172 if (vfs_op_offsets[j - 1] < k && vfs_op_offsets[j] > k)
173 break;
174 k = vfs_op_offsets[j] + 1;
175 }
105 }
176 vfs_op_descs[i]->vdesc_offset = k++;
106 break;
177 }
107 }
178 FREE(vfs_op_offsets, M_TEMP);
179
180 /* Panic if new vops will cause vector overflow */
181 if (k > vfs_opv_numops)
182 panic("VFS: Ran out of vop_t vector entries. %d entries required, only %d available.\n", k, vfs_opv_numops);
183
184 /*
185 * Allocate and fill in the vectors
186 */
187 for (i = 0; i < vnodeopv_num; i++) {
188 opv = vnodeopv_descs[i];
189 opv_desc_vector_p = opv->opv_desc_vector_p;
190 if (*opv_desc_vector_p == NULL)
191 MALLOC(*opv_desc_vector_p, vop_t **,
192 vfs_opv_numops * sizeof(vop_t *), M_VNODE,
193 M_WAITOK | M_ZERO);
194
195 /* Fill in, with slot 0 being to return EOPNOTSUPP */
196 opv_desc_vector = *opv_desc_vector_p;
197 opv_desc_vector[0] = (vop_t *)vop_eopnotsupp;
198 for (j = 0; opv->opv_desc_ops[j].opve_op; j++) {
199 opve_descp = &(opv->opv_desc_ops[j]);
200 opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
201 opve_descp->opve_impl;
202 }
203
204 /* Replace unfilled routines with their default (slot 1). */
205 opv_desc_vector = *(opv->opv_desc_vector_p);
206 if (opv_desc_vector[1] == NULL)
207 panic("vfs_opv_recalc: vector without a default.");
208 for (j = 0; j < vfs_opv_numops; j++)
209 if (opv_desc_vector[j] == NULL)
210 opv_desc_vector[j] = opv_desc_vector[1];
211 }
108 KASSERT(vop != NULL, ("No VCALL(%p...)", vp));
109 if (*bpt != NULL)
110 rc = (*bpt)(ap);
111 else
112 rc = vop->vop_bypass(ap);
113 return (rc);
212}
213
114}
115
214/* Add a set of vnode operations (a description) to the table above. */
215void
216vfs_add_vnodeops(const void *data)
217{
218 const struct vnodeopv_desc *opv;
219 const struct vnodeopv_desc **newopv;
220 struct vnodeop_desc **newop;
221 int *newref;
222 struct vnodeop_desc *desc;
223 int i, j;
224
225 opv = (const struct vnodeopv_desc *)data;
226 MALLOC(newopv, const struct vnodeopv_desc **,
227 (vnodeopv_num + 1) * sizeof(*newopv), M_VNODE, M_WAITOK);
228 if (vnodeopv_descs) {
229 bcopy(vnodeopv_descs, newopv, vnodeopv_num * sizeof(*newopv));
230 FREE(vnodeopv_descs, M_VNODE);
231 }
232 newopv[vnodeopv_num] = opv;
233 vnodeopv_descs = newopv;
234 vnodeopv_num++;
235
236 /* See if we have turned up a new vnode op desc */
237 for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) {
238 for (j = 0; j < num_op_descs; j++) {
239 if (desc == vfs_op_descs[j]) {
240 /* found it, increase reference count */
241 vfs_op_desc_refs[j]++;
242 break;
243 }
244 }
245 if (j == num_op_descs) {
246 /* not found, new entry */
247 MALLOC(newop, struct vnodeop_desc **,
248 (num_op_descs + 1) * sizeof(*newop),
249 M_VNODE, M_WAITOK);
250 /* new reference count (for unload) */
251 MALLOC(newref, int *,
252 (num_op_descs + 1) * sizeof(*newref),
253 M_VNODE, M_WAITOK);
254 if (vfs_op_descs) {
255 bcopy(vfs_op_descs, newop,
256 num_op_descs * sizeof(*newop));
257 FREE(vfs_op_descs, M_VNODE);
258 }
259 if (vfs_op_desc_refs) {
260 bcopy(vfs_op_desc_refs, newref,
261 num_op_descs * sizeof(*newref));
262 FREE(vfs_op_desc_refs, M_VNODE);
263 }
264 newop[num_op_descs] = desc;
265 newref[num_op_descs] = 1;
266 vfs_op_descs = newop;
267 vfs_op_desc_refs = newref;
268 num_op_descs++;
269 }
270 }
271 vfs_opv_recalc();
272}
273
274/* Remove a vnode type from the vnode description table above. */
275void
276vfs_rm_vnodeops(const void *data)
277{
278 const struct vnodeopv_desc *opv;
279 const struct vnodeopv_desc **newopv;
280 struct vnodeop_desc **newop;
281 int *newref;
282 vop_t **opv_desc_vector;
283 struct vnodeop_desc *desc;
284 int i, j, k;
285
286 opv = (const struct vnodeopv_desc *)data;
287 /* Lower ref counts on descs in the table and release if zero */
288 for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) {
289 for (j = 0; j < num_op_descs; j++) {
290 if (desc == vfs_op_descs[j]) {
291 /* found it, decrease reference count */
292 vfs_op_desc_refs[j]--;
293 break;
294 }
295 }
296 for (j = 0; j < num_op_descs; j++) {
297 if (vfs_op_desc_refs[j] > 0)
298 continue;
299 if (vfs_op_desc_refs[j] < 0)
300 panic("vfs_remove_vnodeops: negative refcnt");
301 /* Entry is going away - replace it with defaultop */
302 for (k = 0; k < vnodeopv_num; k++) {
303 opv_desc_vector =
304 *(vnodeopv_descs[k]->opv_desc_vector_p);
305 if (opv_desc_vector != NULL)
306 opv_desc_vector[desc->vdesc_offset] =
307 opv_desc_vector[1];
308 }
309 MALLOC(newop, struct vnodeop_desc **,
310 (num_op_descs - 1) * sizeof(*newop),
311 M_VNODE, M_WAITOK);
312 /* new reference count (for unload) */
313 MALLOC(newref, int *,
314 (num_op_descs - 1) * sizeof(*newref),
315 M_VNODE, M_WAITOK);
316 for (k = j; k < (num_op_descs - 1); k++) {
317 vfs_op_descs[k] = vfs_op_descs[k + 1];
318 vfs_op_desc_refs[k] = vfs_op_desc_refs[k + 1];
319 }
320 bcopy(vfs_op_descs, newop,
321 (num_op_descs - 1) * sizeof(*newop));
322 bcopy(vfs_op_desc_refs, newref,
323 (num_op_descs - 1) * sizeof(*newref));
324 FREE(vfs_op_descs, M_VNODE);
325 FREE(vfs_op_desc_refs, M_VNODE);
326 vfs_op_descs = newop;
327 vfs_op_desc_refs = newref;
328 num_op_descs--;
329 }
330 }
331
332 for (i = 0; i < vnodeopv_num; i++) {
333 if (vnodeopv_descs[i] == opv) {
334 for (j = i; j < (vnodeopv_num - 1); j++)
335 vnodeopv_descs[j] = vnodeopv_descs[j + 1];
336 break;
337 }
338 }
339 if (i == vnodeopv_num)
340 panic("vfs_remove_vnodeops: opv not found");
341 opv_desc_vector = *(opv->opv_desc_vector_p);
342 if (opv_desc_vector != NULL)
343 FREE(opv_desc_vector, M_VNODE);
344 MALLOC(newopv, const struct vnodeopv_desc **,
345 (vnodeopv_num - 1) * sizeof(*newopv), M_VNODE, M_WAITOK);
346 bcopy(vnodeopv_descs, newopv, (vnodeopv_num - 1) * sizeof(*newopv));
347 FREE(vnodeopv_descs, M_VNODE);
348 vnodeopv_descs = newopv;
349 vnodeopv_num--;
350
351 vfs_opv_recalc();
352}
353
354/*
355 * Routines having to do with the management of the vnode table.
356 */
357
358struct vfsconf *
359vfs_byname(const char *name)
360{
361 struct vfsconf *vfsp;
362
363 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
364 if (!strcmp(name, vfsp->vfc_name))
365 return (vfsp);

--- 170 unchanged lines hidden ---
116struct vfsconf *
117vfs_byname(const char *name)
118{
119 struct vfsconf *vfsp;
120
121 TAILQ_FOREACH(vfsp, &vfsconf, vfc_list)
122 if (!strcmp(name, vfsp->vfc_name))
123 return (vfsp);

--- 170 unchanged lines hidden ---