1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2000,2003 Doug Rabson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/kobj.h> 35 #include <sys/lock.h> 36 #include <sys/malloc.h> 37 #include <sys/mutex.h> 38 #include <sys/sysctl.h> 39 #ifndef TEST 40 #include <sys/systm.h> 41 #endif 42 43 #ifdef TEST 44 #include "usertest.h" 45 #endif 46 47 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures"); 48 49 #ifdef KOBJ_STATS 50 51 u_int kobj_lookup_hits; 52 u_int kobj_lookup_misses; 53 54 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD, 55 &kobj_lookup_hits, 0, ""); 56 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD, 57 &kobj_lookup_misses, 0, ""); 58 59 #endif 60 61 static struct mtx kobj_mtx; 62 static int kobj_mutex_inited; 63 static int kobj_next_id = 1; 64 65 #define KOBJ_LOCK() mtx_lock(&kobj_mtx) 66 #define KOBJ_UNLOCK() mtx_unlock(&kobj_mtx) 67 #define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what); 68 69 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD, 70 &kobj_next_id, 0, ""); 71 72 static void 73 kobj_init_mutex(void *arg) 74 { 75 if (!kobj_mutex_inited) { 76 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF); 77 kobj_mutex_inited = 1; 78 } 79 } 80 81 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL); 82 83 /* 84 * This method structure is used to initialise new caches. Since the 85 * desc pointer is NULL, it is guaranteed never to match any read 86 * descriptors. 87 */ 88 static const struct kobj_method null_method = { 89 0, 0, 90 }; 91 92 int 93 kobj_error_method(void) 94 { 95 96 return ENXIO; 97 } 98 99 static void 100 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops) 101 { 102 kobj_method_t *m; 103 int i; 104 105 /* 106 * Don't do anything if we are already compiled. 107 */ 108 if (cls->ops) 109 return; 110 111 /* 112 * First register any methods which need it. 113 */ 114 for (i = 0, m = cls->methods; m->desc; i++, m++) { 115 if (m->desc->id == 0) 116 m->desc->id = kobj_next_id++; 117 } 118 119 /* 120 * Then initialise the ops table. 121 */ 122 for (i = 0; i < KOBJ_CACHE_SIZE; i++) 123 ops->cache[i] = &null_method; 124 ops->cls = cls; 125 cls->ops = ops; 126 } 127 128 void 129 kobj_class_compile(kobj_class_t cls) 130 { 131 kobj_ops_t ops; 132 133 KOBJ_ASSERT(MA_NOTOWNED); 134 135 /* 136 * Allocate space for the compiled ops table. 137 */ 138 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT); 139 if (!ops) 140 panic("%s: out of memory", __func__); 141 142 KOBJ_LOCK(); 143 144 /* 145 * We may have lost a race for kobj_class_compile here - check 146 * to make sure someone else hasn't already compiled this 147 * class. 148 */ 149 if (cls->ops) { 150 KOBJ_UNLOCK(); 151 free(ops, M_KOBJ); 152 return; 153 } 154 155 kobj_class_compile_common(cls, ops); 156 KOBJ_UNLOCK(); 157 } 158 159 void 160 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops) 161 { 162 163 KASSERT(kobj_mutex_inited == 0, 164 ("%s: only supported during early cycles", __func__)); 165 166 /* 167 * Increment refs to make sure that the ops table is not freed. 168 */ 169 cls->refs++; 170 kobj_class_compile_common(cls, ops); 171 } 172 173 static kobj_method_t* 174 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc) 175 { 176 kobj_method_t *methods = cls->methods; 177 kobj_method_t *ce; 178 179 for (ce = methods; ce && ce->desc; ce++) { 180 if (ce->desc == desc) { 181 return ce; 182 } 183 } 184 185 return NULL; 186 } 187 188 static kobj_method_t* 189 kobj_lookup_method_mi(kobj_class_t cls, 190 kobjop_desc_t desc) 191 { 192 kobj_method_t *ce; 193 kobj_class_t *basep; 194 195 ce = kobj_lookup_method_class(cls, desc); 196 if (ce) 197 return ce; 198 199 basep = cls->baseclasses; 200 if (basep) { 201 for (; *basep; basep++) { 202 ce = kobj_lookup_method_mi(*basep, desc); 203 if (ce) 204 return ce; 205 } 206 } 207 208 return NULL; 209 } 210 211 kobj_method_t* 212 kobj_lookup_method(kobj_class_t cls, 213 kobj_method_t **cep, 214 kobjop_desc_t desc) 215 { 216 kobj_method_t *ce; 217 218 ce = kobj_lookup_method_mi(cls, desc); 219 if (!ce) 220 ce = &desc->deflt; 221 if (cep) 222 *cep = ce; 223 return ce; 224 } 225 226 void 227 kobj_class_free(kobj_class_t cls) 228 { 229 void* ops = NULL; 230 231 KOBJ_ASSERT(MA_NOTOWNED); 232 KOBJ_LOCK(); 233 234 /* 235 * Protect against a race between kobj_create and 236 * kobj_delete. 237 */ 238 if (cls->refs == 0) { 239 /* 240 * For now we don't do anything to unregister any methods 241 * which are no longer used. 242 */ 243 244 /* 245 * Free memory and clean up. 246 */ 247 ops = cls->ops; 248 cls->ops = NULL; 249 } 250 251 KOBJ_UNLOCK(); 252 253 if (ops) 254 free(ops, M_KOBJ); 255 } 256 257 kobj_t 258 kobj_create(kobj_class_t cls, 259 struct malloc_type *mtype, 260 int mflags) 261 { 262 kobj_t obj; 263 264 /* 265 * Allocate and initialise the new object. 266 */ 267 obj = malloc(cls->size, mtype, mflags | M_ZERO); 268 if (!obj) 269 return NULL; 270 kobj_init(obj, cls); 271 272 return obj; 273 } 274 275 static void 276 kobj_init_common(kobj_t obj, kobj_class_t cls) 277 { 278 279 obj->ops = cls->ops; 280 cls->refs++; 281 } 282 283 void 284 kobj_init(kobj_t obj, kobj_class_t cls) 285 { 286 KOBJ_ASSERT(MA_NOTOWNED); 287 retry: 288 KOBJ_LOCK(); 289 290 /* 291 * Consider compiling the class' method table. 292 */ 293 if (!cls->ops) { 294 /* 295 * kobj_class_compile doesn't want the lock held 296 * because of the call to malloc - we drop the lock 297 * and re-try. 298 */ 299 KOBJ_UNLOCK(); 300 kobj_class_compile(cls); 301 goto retry; 302 } 303 304 kobj_init_common(obj, cls); 305 306 KOBJ_UNLOCK(); 307 } 308 309 void 310 kobj_init_static(kobj_t obj, kobj_class_t cls) 311 { 312 313 KASSERT(kobj_mutex_inited == 0, 314 ("%s: only supported during early cycles", __func__)); 315 316 kobj_init_common(obj, cls); 317 } 318 319 void 320 kobj_delete(kobj_t obj, struct malloc_type *mtype) 321 { 322 kobj_class_t cls = obj->ops->cls; 323 int refs; 324 325 /* 326 * Consider freeing the compiled method table for the class 327 * after its last instance is deleted. As an optimisation, we 328 * should defer this for a short while to avoid thrashing. 329 */ 330 KOBJ_ASSERT(MA_NOTOWNED); 331 KOBJ_LOCK(); 332 cls->refs--; 333 refs = cls->refs; 334 KOBJ_UNLOCK(); 335 336 if (!refs) 337 kobj_class_free(cls); 338 339 obj->ops = NULL; 340 if (mtype) 341 free(obj, mtype); 342 } 343