xref: /freebsd/sys/kern/subr_kobj.c (revision aa24f48b361effe51163877d84f1b70d32b77e04)
1 /*-
2  * Copyright (c) 2000,2003 Doug Rabson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/kobj.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/sysctl.h>
37 #ifndef TEST
38 #include <sys/systm.h>
39 #endif
40 
41 #ifdef TEST
42 #include "usertest.h"
43 #endif
44 
45 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
46 
47 #ifdef KOBJ_STATS
48 
49 u_int kobj_lookup_hits;
50 u_int kobj_lookup_misses;
51 
52 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53 	   &kobj_lookup_hits, 0, "");
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55 	   &kobj_lookup_misses, 0, "");
56 
57 #endif
58 
59 static struct mtx kobj_mtx;
60 static int kobj_mutex_inited;
61 static int kobj_next_id = 1;
62 
63 #define	KOBJ_LOCK()		mtx_lock(&kobj_mtx)
64 #define	KOBJ_UNLOCK()		mtx_unlock(&kobj_mtx)
65 #define	KOBJ_ASSERT(what)	mtx_assert(&kobj_mtx, what);
66 
67 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
68 	   &kobj_next_id, 0, "");
69 
70 static void
71 kobj_init_mutex(void *arg)
72 {
73 	if (!kobj_mutex_inited) {
74 		mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
75 		kobj_mutex_inited = 1;
76 	}
77 }
78 
79 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
80 
81 /*
82  * This method structure is used to initialise new caches. Since the
83  * desc pointer is NULL, it is guaranteed never to match any read
84  * descriptors.
85  */
86 static const struct kobj_method null_method = {
87 	0, 0,
88 };
89 
90 int
91 kobj_error_method(void)
92 {
93 
94 	return ENXIO;
95 }
96 
97 static void
98 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
99 {
100 	kobj_method_t *m;
101 	int i;
102 
103 	/*
104 	 * Don't do anything if we are already compiled.
105 	 */
106 	if (cls->ops)
107 		return;
108 
109 	/*
110 	 * First register any methods which need it.
111 	 */
112 	for (i = 0, m = cls->methods; m->desc; i++, m++) {
113 		if (m->desc->id == 0)
114 			m->desc->id = kobj_next_id++;
115 	}
116 
117 	/*
118 	 * Then initialise the ops table.
119 	 */
120 	for (i = 0; i < KOBJ_CACHE_SIZE; i++)
121 		ops->cache[i] = &null_method;
122 	ops->cls = cls;
123 	cls->ops = ops;
124 }
125 
126 void
127 kobj_class_compile(kobj_class_t cls)
128 {
129 	kobj_ops_t ops;
130 
131 	KOBJ_ASSERT(MA_NOTOWNED);
132 
133 	/*
134 	 * Allocate space for the compiled ops table.
135 	 */
136 	ops = malloc(sizeof(struct kobj_ops), M_KOBJ, M_NOWAIT);
137 	if (!ops)
138 		panic("%s: out of memory", __func__);
139 
140 	KOBJ_LOCK();
141 
142 	/*
143 	 * We may have lost a race for kobj_class_compile here - check
144 	 * to make sure someone else hasn't already compiled this
145 	 * class.
146 	 */
147 	if (cls->ops) {
148 		KOBJ_UNLOCK();
149 		free(ops, M_KOBJ);
150 		return;
151 	}
152 
153 	kobj_class_compile_common(cls, ops);
154 	KOBJ_UNLOCK();
155 }
156 
157 void
158 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
159 {
160 
161 	KASSERT(kobj_mutex_inited == 0,
162 	    ("%s: only supported during early cycles", __func__));
163 
164 	/*
165 	 * Increment refs to make sure that the ops table is not freed.
166 	 */
167 	cls->refs++;
168 	kobj_class_compile_common(cls, ops);
169 }
170 
171 static kobj_method_t*
172 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
173 {
174 	kobj_method_t *methods = cls->methods;
175 	kobj_method_t *ce;
176 
177 	for (ce = methods; ce && ce->desc; ce++) {
178 		if (ce->desc == desc) {
179 			return ce;
180 		}
181 	}
182 
183 	return NULL;
184 }
185 
186 static kobj_method_t*
187 kobj_lookup_method_mi(kobj_class_t cls,
188 		      kobjop_desc_t desc)
189 {
190 	kobj_method_t *ce;
191 	kobj_class_t *basep;
192 
193 	ce = kobj_lookup_method_class(cls, desc);
194 	if (ce)
195 		return ce;
196 
197 	basep = cls->baseclasses;
198 	if (basep) {
199 		for (; *basep; basep++) {
200 			ce = kobj_lookup_method_mi(*basep, desc);
201 			if (ce)
202 				return ce;
203 		}
204 	}
205 
206 	return NULL;
207 }
208 
209 kobj_method_t*
210 kobj_lookup_method(kobj_class_t cls,
211 		   kobj_method_t **cep,
212 		   kobjop_desc_t desc)
213 {
214 	kobj_method_t *ce;
215 
216 	ce = kobj_lookup_method_mi(cls, desc);
217 	if (!ce)
218 		ce = &desc->deflt;
219 	if (cep)
220 		*cep = ce;
221 	return ce;
222 }
223 
224 void
225 kobj_class_free(kobj_class_t cls)
226 {
227 	void* ops = NULL;
228 
229 	KOBJ_ASSERT(MA_NOTOWNED);
230 	KOBJ_LOCK();
231 
232 	/*
233 	 * Protect against a race between kobj_create and
234 	 * kobj_delete.
235 	 */
236 	if (cls->refs == 0) {
237 		/*
238 		 * For now we don't do anything to unregister any methods
239 		 * which are no longer used.
240 		 */
241 
242 		/*
243 		 * Free memory and clean up.
244 		 */
245 		ops = cls->ops;
246 		cls->ops = NULL;
247 	}
248 
249 	KOBJ_UNLOCK();
250 
251 	if (ops)
252 		free(ops, M_KOBJ);
253 }
254 
255 kobj_t
256 kobj_create(kobj_class_t cls,
257 	    struct malloc_type *mtype,
258 	    int mflags)
259 {
260 	kobj_t obj;
261 
262 	/*
263 	 * Allocate and initialise the new object.
264 	 */
265 	obj = malloc(cls->size, mtype, mflags | M_ZERO);
266 	if (!obj)
267 		return NULL;
268 	kobj_init(obj, cls);
269 
270 	return obj;
271 }
272 
273 static void
274 kobj_init_common(kobj_t obj, kobj_class_t cls)
275 {
276 
277 	obj->ops = cls->ops;
278 	cls->refs++;
279 }
280 
281 void
282 kobj_init(kobj_t obj, kobj_class_t cls)
283 {
284 	KOBJ_ASSERT(MA_NOTOWNED);
285   retry:
286 	KOBJ_LOCK();
287 
288 	/*
289 	 * Consider compiling the class' method table.
290 	 */
291 	if (!cls->ops) {
292 		/*
293 		 * kobj_class_compile doesn't want the lock held
294 		 * because of the call to malloc - we drop the lock
295 		 * and re-try.
296 		 */
297 		KOBJ_UNLOCK();
298 		kobj_class_compile(cls);
299 		goto retry;
300 	}
301 
302 	kobj_init_common(obj, cls);
303 
304 	KOBJ_UNLOCK();
305 }
306 
307 void
308 kobj_init_static(kobj_t obj, kobj_class_t cls)
309 {
310 
311 	KASSERT(kobj_mutex_inited == 0,
312 	    ("%s: only supported during early cycles", __func__));
313 
314 	kobj_init_common(obj, cls);
315 }
316 
317 void
318 kobj_delete(kobj_t obj, struct malloc_type *mtype)
319 {
320 	kobj_class_t cls = obj->ops->cls;
321 	int refs;
322 
323 	/*
324 	 * Consider freeing the compiled method table for the class
325 	 * after its last instance is deleted. As an optimisation, we
326 	 * should defer this for a short while to avoid thrashing.
327 	 */
328 	KOBJ_ASSERT(MA_NOTOWNED);
329 	KOBJ_LOCK();
330 	cls->refs--;
331 	refs = cls->refs;
332 	KOBJ_UNLOCK();
333 
334 	if (!refs)
335 		kobj_class_free(cls);
336 
337 	obj->ops = NULL;
338 	if (mtype)
339 		free(obj, mtype);
340 }
341