1 /* 2 * Copyright (c) 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed 6 * to Berkeley by John Heidemann of the UCLA Ficus project. 7 * 8 * Source: * @(#)i405_init.c 2.10 92/04/27 UCLA Ficus project 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)vfs_init.c 8.3 (Berkeley) 1/4/94 39 * $FreeBSD$ 40 */ 41 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/kernel.h> 46 #include <sys/mount.h> 47 #include <sys/sysctl.h> 48 #include <sys/vnode.h> 49 #include <sys/malloc.h> 50 #include <vm/vm_zone.h> 51 52 53 MALLOC_DEFINE(M_VNODE, "vnodes", "Dynamically allocated vnodes"); 54 55 /* 56 * Zone for namei 57 */ 58 struct vm_zone *namei_zone; 59 60 /* 61 * vfs_init() will set maxvfsconf 62 * to the highest defined type number. 63 */ 64 int maxvfsconf; 65 struct vfsconf *vfsconf; 66 67 /* 68 * vfs_init.c 69 * 70 * Allocate and fill in operations vectors. 71 * 72 * An undocumented feature of this approach to defining operations is that 73 * there can be multiple entries in vfs_opv_descs for the same operations 74 * vector. This allows third parties to extend the set of operations 75 * supported by another layer in a binary compatibile way. For example, 76 * assume that NFS needed to be modified to support Ficus. NFS has an entry 77 * (probably nfs_vnopdeop_decls) declaring all the operations NFS supports by 78 * default. Ficus could add another entry (ficus_nfs_vnodeop_decl_entensions) 79 * listing those new operations Ficus adds to NFS, all without modifying the 80 * NFS code. (Of couse, the OTW NFS protocol still needs to be munged, but 81 * that is a(whole)nother story.) This is a feature. 82 */ 83 84 /* Table of known vnodeop vectors (list of VFS vnode vectors) */ 85 static const struct vnodeopv_desc **vnodeopv_descs; 86 static int vnodeopv_num; 87 88 /* Table of known descs (list of vnode op handlers "vop_access_desc") */ 89 static struct vnodeop_desc **vfs_op_descs; 90 static int *vfs_op_desc_refs; /* reference counts */ 91 static int num_op_descs; 92 static int vfs_opv_numops; 93 94 static void 95 vfs_opv_recalc(void) 96 { 97 int i, j; 98 vop_t ***opv_desc_vector_p; 99 vop_t **opv_desc_vector; 100 struct vnodeopv_entry_desc *opve_descp; 101 const struct vnodeopv_desc *opv; 102 103 if (vfs_op_descs == NULL) 104 panic("vfs_opv_recalc called with null vfs_op_descs"); 105 106 /* 107 * Run through and make sure all known descs have an offset 108 * 109 * vop_default_desc is hardwired at offset 1, and offset 0 110 * is a panic sanity check. 111 */ 112 vfs_opv_numops = 0; 113 for (i = 0; i < num_op_descs; i++) 114 if (vfs_opv_numops < (vfs_op_descs[i]->vdesc_offset + 1)) 115 vfs_opv_numops = vfs_op_descs[i]->vdesc_offset + 1; 116 for (i = 0; i < num_op_descs; i++) 117 if (vfs_op_descs[i]->vdesc_offset == 0) 118 vfs_op_descs[i]->vdesc_offset = vfs_opv_numops++; 119 /* 120 * Allocate and fill in the vectors 121 */ 122 for (i = 0; i < vnodeopv_num; i++) { 123 opv = vnodeopv_descs[i]; 124 opv_desc_vector_p = opv->opv_desc_vector_p; 125 if (*opv_desc_vector_p) 126 FREE(*opv_desc_vector_p, M_VNODE); 127 MALLOC(*opv_desc_vector_p, vop_t **, 128 vfs_opv_numops * sizeof(vop_t *), M_VNODE, M_WAITOK); 129 if (*opv_desc_vector_p == NULL) 130 panic("no memory for vop_t ** vector"); 131 bzero(*opv_desc_vector_p, vfs_opv_numops * sizeof(vop_t *)); 132 133 /* Fill in, with slot 0 being panic */ 134 opv_desc_vector = *opv_desc_vector_p; 135 opv_desc_vector[0] = (vop_t *)vop_panic; 136 for (j = 0; opv->opv_desc_ops[j].opve_op; j++) { 137 opve_descp = &(opv->opv_desc_ops[j]); 138 opv_desc_vector[opve_descp->opve_op->vdesc_offset] = 139 opve_descp->opve_impl; 140 } 141 142 /* Replace unfilled routines with their default (slot 1). */ 143 opv_desc_vector = *(opv->opv_desc_vector_p); 144 if (opv_desc_vector[1] == NULL) 145 panic("vfs_opv_recalc: vector without a default."); 146 for (j = 0; j < vfs_opv_numops; j++) 147 if (opv_desc_vector[j] == NULL) 148 opv_desc_vector[j] = opv_desc_vector[1]; 149 } 150 } 151 152 void 153 vfs_add_vnodeops(const void *data) 154 { 155 const struct vnodeopv_desc *opv; 156 const struct vnodeopv_desc **newopv; 157 struct vnodeop_desc **newop; 158 int *newref; 159 vop_t **opv_desc_vector; 160 struct vnodeop_desc *desc; 161 int i, j; 162 163 opv = (const struct vnodeopv_desc *)data; 164 MALLOC(newopv, const struct vnodeopv_desc **, 165 (vnodeopv_num + 1) * sizeof(*newopv), M_VNODE, M_WAITOK); 166 if (newopv == NULL) 167 panic("vfs_add_vnodeops: no memory"); 168 if (vnodeopv_descs) { 169 bcopy(vnodeopv_descs, newopv, vnodeopv_num * sizeof(*newopv)); 170 FREE(vnodeopv_descs, M_VNODE); 171 } 172 newopv[vnodeopv_num] = opv; 173 vnodeopv_descs = newopv; 174 vnodeopv_num++; 175 176 /* See if we have turned up a new vnode op desc */ 177 opv_desc_vector = *(opv->opv_desc_vector_p); 178 for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) { 179 for (j = 0; j < num_op_descs; j++) { 180 if (desc == vfs_op_descs[j]) { 181 /* found it, increase reference count */ 182 vfs_op_desc_refs[j]++; 183 break; 184 } 185 } 186 if (j == num_op_descs) { 187 /* not found, new entry */ 188 MALLOC(newop, struct vnodeop_desc **, 189 (num_op_descs + 1) * sizeof(*newop), 190 M_VNODE, M_WAITOK); 191 if (newop == NULL) 192 panic("vfs_add_vnodeops: no memory for desc"); 193 /* new reference count (for unload) */ 194 MALLOC(newref, int *, 195 (num_op_descs + 1) * sizeof(*newref), 196 M_VNODE, M_WAITOK); 197 if (newref == NULL) 198 panic("vfs_add_vnodeops: no memory for refs"); 199 if (vfs_op_descs) { 200 bcopy(vfs_op_descs, newop, 201 num_op_descs * sizeof(*newop)); 202 FREE(vfs_op_descs, M_VNODE); 203 } 204 if (vfs_op_desc_refs) { 205 bcopy(vfs_op_desc_refs, newref, 206 num_op_descs * sizeof(*newref)); 207 FREE(vfs_op_desc_refs, M_VNODE); 208 } 209 newop[num_op_descs] = desc; 210 newref[num_op_descs] = 1; 211 vfs_op_descs = newop; 212 vfs_op_desc_refs = newref; 213 num_op_descs++; 214 } 215 } 216 vfs_opv_recalc(); 217 } 218 219 void 220 vfs_rm_vnodeops(const void *data) 221 { 222 const struct vnodeopv_desc *opv; 223 const struct vnodeopv_desc **newopv; 224 struct vnodeop_desc **newop; 225 int *newref; 226 vop_t **opv_desc_vector; 227 struct vnodeop_desc *desc; 228 int i, j, k; 229 230 opv = (const struct vnodeopv_desc *)data; 231 /* Lower ref counts on descs in the table and release if zero */ 232 opv_desc_vector = *(opv->opv_desc_vector_p); 233 for (i = 0; (desc = opv->opv_desc_ops[i].opve_op); i++) { 234 for (j = 0; j < num_op_descs; j++) { 235 if (desc == vfs_op_descs[j]) { 236 /* found it, decrease reference count */ 237 vfs_op_desc_refs[j]--; 238 break; 239 } 240 } 241 for (j = 0; j < num_op_descs; j++) { 242 if (vfs_op_desc_refs[j] > 0) 243 continue; 244 if (vfs_op_desc_refs[j] < 0) 245 panic("vfs_remove_vnodeops: negative refcnt"); 246 MALLOC(newop, struct vnodeop_desc **, 247 (num_op_descs - 1) * sizeof(*newop), 248 M_VNODE, M_WAITOK); 249 if (newop == NULL) 250 panic("vfs_remove_vnodeops: no memory for desc"); 251 /* new reference count (for unload) */ 252 MALLOC(newref, int *, 253 (num_op_descs - 1) * sizeof(*newref), 254 M_VNODE, M_WAITOK); 255 if (newref == NULL) 256 panic("vfs_remove_vnodeops: no memory for refs"); 257 for (k = j; k < (num_op_descs - 1); k++) { 258 vfs_op_descs[k] = vfs_op_descs[k + 1]; 259 vfs_op_desc_refs[k] = vfs_op_desc_refs[k + 1]; 260 } 261 bcopy(vfs_op_descs, newop, 262 (num_op_descs - 1) * sizeof(*newop)); 263 bcopy(vfs_op_desc_refs, newref, 264 (num_op_descs - 1) * sizeof(*newref)); 265 FREE(vfs_op_descs, M_VNODE); 266 FREE(vfs_op_desc_refs, M_VNODE); 267 vfs_op_descs = newop; 268 vfs_op_desc_refs = newref; 269 num_op_descs--; 270 } 271 } 272 273 for (i = 0; i < vnodeopv_num; i++) { 274 if (vnodeopv_descs[i] == opv) { 275 for (j = i; j < (vnodeopv_num - 1); j++) 276 vnodeopv_descs[j] = vnodeopv_descs[j + 1]; 277 break; 278 } 279 } 280 if (i == vnodeopv_num) 281 panic("vfs_remove_vnodeops: opv not found"); 282 MALLOC(newopv, const struct vnodeopv_desc **, 283 (vnodeopv_num - 1) * sizeof(*newopv), M_VNODE, M_WAITOK); 284 if (newopv == NULL) 285 panic("vfs_remove_vnodeops: no memory"); 286 bcopy(vnodeopv_descs, newopv, (vnodeopv_num - 1) * sizeof(*newopv)); 287 FREE(vnodeopv_descs, M_VNODE); 288 vnodeopv_descs = newopv; 289 vnodeopv_num--; 290 291 vfs_opv_recalc(); 292 } 293 294 /* 295 * Routines having to do with the management of the vnode table. 296 */ 297 struct vattr va_null; 298 299 /* 300 * Initialize the vnode structures and initialize each file system type. 301 */ 302 /* ARGSUSED*/ 303 static void 304 vfsinit(void *dummy) 305 { 306 307 namei_zone = zinit("NAMEI", MAXPATHLEN, 0, 0, 2); 308 309 /* 310 * Initialize the vnode table 311 */ 312 vntblinit(); 313 /* 314 * Initialize the vnode name cache 315 */ 316 nchinit(); 317 /* 318 * Initialize each file system type. 319 * Vfs type numbers must be distinct from VFS_GENERIC (and VFS_VFSCONF). 320 */ 321 vattr_null(&va_null); 322 maxvfsconf = VFS_GENERIC + 1; 323 } 324 SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vfsinit, NULL) 325 326 int 327 vfs_register(struct vfsconf *vfc) 328 { 329 struct sysctl_oid *oidp; 330 struct vfsconf *vfsp; 331 332 vfsp = NULL; 333 if (vfsconf) 334 for (vfsp = vfsconf; vfsp->vfc_next; vfsp = vfsp->vfc_next) 335 if (strcmp(vfc->vfc_name, vfsp->vfc_name) == 0) 336 return EEXIST; 337 338 vfc->vfc_typenum = maxvfsconf++; 339 if (vfsp) 340 vfsp->vfc_next = vfc; 341 else 342 vfsconf = vfc; 343 vfc->vfc_next = NULL; 344 345 /* 346 * If this filesystem has a sysctl node under vfs 347 * (i.e. vfs.xxfs), then change the oid number of that node to 348 * match the filesystem's type number. This allows user code 349 * which uses the type number to read sysctl variables defined 350 * by the filesystem to continue working. Since the oids are 351 * in a sorted list, we need to make sure the order is 352 * preserved by re-registering the oid after modifying its 353 * number. 354 */ 355 for (oidp = SLIST_FIRST(&sysctl__vfs_children); oidp; 356 oidp = SLIST_NEXT(oidp, oid_link)) 357 if (strcmp(oidp->oid_name, vfc->vfc_name) == 0) { 358 sysctl_unregister_oid(oidp); 359 oidp->oid_number = vfc->vfc_typenum; 360 sysctl_register_oid(oidp); 361 } 362 363 /* 364 * Call init function for this VFS... 365 */ 366 (*(vfc->vfc_vfsops->vfs_init))(vfc); 367 368 return 0; 369 } 370 371 372 int 373 vfs_unregister(struct vfsconf *vfc) 374 { 375 struct vfsconf *vfsp, *prev_vfsp; 376 int error, i, maxtypenum; 377 378 i = vfc->vfc_typenum; 379 380 prev_vfsp = NULL; 381 for (vfsp = vfsconf; vfsp; 382 prev_vfsp = vfsp, vfsp = vfsp->vfc_next) { 383 if (!strcmp(vfc->vfc_name, vfsp->vfc_name)) 384 break; 385 } 386 if (vfsp == NULL) 387 return EINVAL; 388 if (vfsp->vfc_refcount) 389 return EBUSY; 390 if (vfc->vfc_vfsops->vfs_uninit != NULL) { 391 error = (*vfc->vfc_vfsops->vfs_uninit)(vfsp); 392 if (error) 393 return (error); 394 } 395 if (prev_vfsp) 396 prev_vfsp->vfc_next = vfsp->vfc_next; 397 else 398 vfsconf = vfsp->vfc_next; 399 maxtypenum = VFS_GENERIC; 400 for (vfsp = vfsconf; vfsp != NULL; vfsp = vfsp->vfc_next) 401 if (maxtypenum < vfsp->vfc_typenum) 402 maxtypenum = vfsp->vfc_typenum; 403 maxvfsconf = maxtypenum + 1; 404 return 0; 405 } 406 407 int 408 vfs_modevent(module_t mod, int type, void *data) 409 { 410 struct vfsconf *vfc; 411 int error = 0; 412 413 vfc = (struct vfsconf *)data; 414 415 switch (type) { 416 case MOD_LOAD: 417 if (vfc) 418 error = vfs_register(vfc); 419 break; 420 421 case MOD_UNLOAD: 422 if (vfc) 423 error = vfs_unregister(vfc); 424 break; 425 default: /* including MOD_SHUTDOWN */ 426 break; 427 } 428 return (error); 429 } 430