1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2000,2003 Doug Rabson
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/kernel.h>
31 #include <sys/kobj.h>
32 #include <sys/lock.h>
33 #include <sys/malloc.h>
34 #include <sys/mutex.h>
35 #include <sys/sysctl.h>
36 #ifndef TEST
37 #include <sys/systm.h>
38 #endif
39
40 #ifdef TEST
41 #include "usertest.h"
42 #endif
43
44 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
45
46 #ifdef KOBJ_STATS
47
48 u_int kobj_lookup_hits;
49 u_int kobj_lookup_misses;
50
51 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
52 &kobj_lookup_hits, 0, "");
53 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
54 &kobj_lookup_misses, 0, "");
55
56 #endif
57
58 static struct mtx kobj_mtx;
59 static int kobj_mutex_inited;
60 static int kobj_next_id = 1;
61
62 #define KOBJ_LOCK() mtx_lock(&kobj_mtx)
63 #define KOBJ_UNLOCK() mtx_unlock(&kobj_mtx)
64 #define KOBJ_ASSERT(what) mtx_assert(&kobj_mtx, what);
65
66 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
67 &kobj_next_id, 0,
68 "Number of kernel object methods registered");
69
70 static void
kobj_init_mutex(void * arg)71 kobj_init_mutex(void *arg)
72 {
73 if (!kobj_mutex_inited) {
74 mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
75 kobj_mutex_inited = 1;
76 }
77 }
78
79 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
80
81 /*
82 * This method structure is used to initialise new caches. Since the
83 * desc pointer is NULL, it is guaranteed never to match any read
84 * descriptors.
85 */
86 static const struct kobj_method null_method = {
87 0, 0,
88 };
89
90 int
kobj_error_method(void)91 kobj_error_method(void)
92 {
93
94 return ENXIO;
95 }
96
97 static void
kobj_class_compile_common(kobj_class_t cls,kobj_ops_t ops)98 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
99 {
100 kobj_method_t *m;
101 int i;
102
103 /*
104 * Don't do anything if we are already compiled.
105 */
106 if (cls->ops)
107 return;
108
109 /*
110 * First register any methods which need it.
111 */
112 for (m = cls->methods; m->desc; m++) {
113 if (m->desc->id == 0)
114 m->desc->id = kobj_next_id++;
115 }
116
117 /*
118 * Then initialise the ops table.
119 */
120 for (i = 0; i < KOBJ_CACHE_SIZE; i++)
121 ops->cache[i] = &null_method;
122 ops->cls = cls;
123 cls->ops = ops;
124 }
125
126 static int
kobj_class_compile1(kobj_class_t cls,int mflags)127 kobj_class_compile1(kobj_class_t cls, int mflags)
128 {
129 kobj_ops_t ops;
130
131 KOBJ_ASSERT(MA_NOTOWNED);
132
133 ops = malloc(sizeof(struct kobj_ops), M_KOBJ, mflags);
134 if (ops == NULL)
135 return (ENOMEM);
136
137 /*
138 * We may have lost a race for kobj_class_compile here - check
139 * to make sure someone else hasn't already compiled this
140 * class.
141 */
142 KOBJ_LOCK();
143 if (cls->ops) {
144 KOBJ_UNLOCK();
145 free(ops, M_KOBJ);
146 return (0);
147 }
148 kobj_class_compile_common(cls, ops);
149 KOBJ_UNLOCK();
150 return (0);
151 }
152
153 void
kobj_class_compile(kobj_class_t cls)154 kobj_class_compile(kobj_class_t cls)
155 {
156 int error __diagused;
157
158 error = kobj_class_compile1(cls, M_WAITOK);
159 KASSERT(error == 0, ("kobj_class_compile1 returned %d", error));
160 }
161
162 void
kobj_class_compile_static(kobj_class_t cls,kobj_ops_t ops)163 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
164 {
165
166 KASSERT(kobj_mutex_inited == 0,
167 ("%s: only supported during early cycles", __func__));
168
169 /*
170 * Increment refs to make sure that the ops table is not freed.
171 */
172 cls->refs++;
173 kobj_class_compile_common(cls, ops);
174 }
175
176 static kobj_method_t*
kobj_lookup_method_class(kobj_class_t cls,kobjop_desc_t desc)177 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
178 {
179 kobj_method_t *methods = cls->methods;
180 kobj_method_t *ce;
181
182 for (ce = methods; ce && ce->desc; ce++) {
183 if (ce->desc == desc) {
184 return ce;
185 }
186 }
187
188 return NULL;
189 }
190
191 static kobj_method_t*
kobj_lookup_method_mi(kobj_class_t cls,kobjop_desc_t desc)192 kobj_lookup_method_mi(kobj_class_t cls,
193 kobjop_desc_t desc)
194 {
195 kobj_method_t *ce;
196 kobj_class_t *basep;
197
198 ce = kobj_lookup_method_class(cls, desc);
199 if (ce)
200 return ce;
201
202 basep = cls->baseclasses;
203 if (basep) {
204 for (; *basep; basep++) {
205 ce = kobj_lookup_method_mi(*basep, desc);
206 if (ce)
207 return ce;
208 }
209 }
210
211 return NULL;
212 }
213
214 kobj_method_t*
kobj_lookup_method(kobj_class_t cls,kobj_method_t ** cep,kobjop_desc_t desc)215 kobj_lookup_method(kobj_class_t cls,
216 kobj_method_t **cep,
217 kobjop_desc_t desc)
218 {
219 kobj_method_t *ce;
220
221 ce = kobj_lookup_method_mi(cls, desc);
222 if (!ce)
223 ce = &desc->deflt;
224 if (cep)
225 *cep = ce;
226 return ce;
227 }
228
229 void
kobj_class_free(kobj_class_t cls)230 kobj_class_free(kobj_class_t cls)
231 {
232 void* ops = NULL;
233
234 KOBJ_ASSERT(MA_NOTOWNED);
235 KOBJ_LOCK();
236
237 /*
238 * Protect against a race between kobj_create and
239 * kobj_delete.
240 */
241 if (cls->refs == 0) {
242 /*
243 * For now we don't do anything to unregister any methods
244 * which are no longer used.
245 */
246
247 /*
248 * Free memory and clean up.
249 */
250 ops = cls->ops;
251 cls->ops = NULL;
252 }
253
254 KOBJ_UNLOCK();
255
256 if (ops)
257 free(ops, M_KOBJ);
258 }
259
260 static void
kobj_init_common(kobj_t obj,kobj_class_t cls)261 kobj_init_common(kobj_t obj, kobj_class_t cls)
262 {
263
264 obj->ops = cls->ops;
265 cls->refs++;
266 }
267
268 static int
kobj_init1(kobj_t obj,kobj_class_t cls,int mflags)269 kobj_init1(kobj_t obj, kobj_class_t cls, int mflags)
270 {
271 int error;
272
273 KOBJ_LOCK();
274 while (cls->ops == NULL) {
275 /*
276 * kobj_class_compile doesn't want the lock held
277 * because of the call to malloc - we drop the lock
278 * and re-try.
279 */
280 KOBJ_UNLOCK();
281 error = kobj_class_compile1(cls, mflags);
282 if (error != 0)
283 return (error);
284 KOBJ_LOCK();
285 }
286 kobj_init_common(obj, cls);
287 KOBJ_UNLOCK();
288 return (0);
289 }
290
291 kobj_t
kobj_create(kobj_class_t cls,struct malloc_type * mtype,int mflags)292 kobj_create(kobj_class_t cls, struct malloc_type *mtype, int mflags)
293 {
294 kobj_t obj;
295
296 obj = malloc(cls->size, mtype, mflags | M_ZERO);
297 if (obj == NULL)
298 return (NULL);
299 if (kobj_init1(obj, cls, mflags) != 0) {
300 free(obj, mtype);
301 return (NULL);
302 }
303 return (obj);
304 }
305
306 void
kobj_init(kobj_t obj,kobj_class_t cls)307 kobj_init(kobj_t obj, kobj_class_t cls)
308 {
309 int error;
310
311 error = kobj_init1(obj, cls, M_NOWAIT);
312 if (error != 0)
313 panic("kobj_init1 failed: error %d", error);
314 }
315
316 void
kobj_init_static(kobj_t obj,kobj_class_t cls)317 kobj_init_static(kobj_t obj, kobj_class_t cls)
318 {
319
320 KASSERT(kobj_mutex_inited == 0,
321 ("%s: only supported during early cycles", __func__));
322
323 kobj_init_common(obj, cls);
324 }
325
326 void
kobj_delete(kobj_t obj,struct malloc_type * mtype)327 kobj_delete(kobj_t obj, struct malloc_type *mtype)
328 {
329 kobj_class_t cls = obj->ops->cls;
330 int refs;
331
332 /*
333 * Consider freeing the compiled method table for the class
334 * after its last instance is deleted. As an optimisation, we
335 * should defer this for a short while to avoid thrashing.
336 */
337 KOBJ_ASSERT(MA_NOTOWNED);
338 KOBJ_LOCK();
339 cls->refs--;
340 refs = cls->refs;
341 KOBJ_UNLOCK();
342
343 if (!refs)
344 kobj_class_free(cls);
345
346 obj->ops = NULL;
347 if (mtype)
348 free(obj, mtype);
349 }
350