xref: /freebsd/sys/kern/subr_kobj.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2000,2003 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/kernel.h>
34 #include <sys/kobj.h>
35 #include <sys/lock.h>
36 #include <sys/malloc.h>
37 #include <sys/mutex.h>
38 #include <sys/sysctl.h>
39 #ifndef TEST
40 #include <sys/systm.h>
41 #endif
42 
43 #ifdef TEST
44 #include "usertest.h"
45 #endif
46 
47 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
48 
49 #ifdef KOBJ_STATS
50 
51 u_int kobj_lookup_hits;
52 u_int kobj_lookup_misses;
53 
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
55 	   &kobj_lookup_hits, 0, "");
56 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
57 	   &kobj_lookup_misses, 0, "");
58 
59 #endif
60 
61 static struct mtx kobj_mtx;
62 static int kobj_mutex_inited;
63 static int kobj_next_id = 1;
64 
65 #define	KOBJ_LOCK()		mtx_lock(&kobj_mtx)
66 #define	KOBJ_UNLOCK()		mtx_unlock(&kobj_mtx)
67 #define	KOBJ_ASSERT(what)	mtx_assert(&kobj_mtx, what);
68 
69 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
70     &kobj_next_id, 0,
71     "Number of kernel object methods registered");
72 
73 static void
74 kobj_init_mutex(void *arg)
75 {
76 	if (!kobj_mutex_inited) {
77 		mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
78 		kobj_mutex_inited = 1;
79 	}
80 }
81 
82 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
83 
84 /*
85  * This method structure is used to initialise new caches. Since the
86  * desc pointer is NULL, it is guaranteed never to match any read
87  * descriptors.
88  */
89 static const struct kobj_method null_method = {
90 	0, 0,
91 };
92 
93 int
94 kobj_error_method(void)
95 {
96 
97 	return ENXIO;
98 }
99 
100 static void
101 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
102 {
103 	kobj_method_t *m;
104 	int i;
105 
106 	/*
107 	 * Don't do anything if we are already compiled.
108 	 */
109 	if (cls->ops)
110 		return;
111 
112 	/*
113 	 * First register any methods which need it.
114 	 */
115 	for (m = cls->methods; m->desc; m++) {
116 		if (m->desc->id == 0)
117 			m->desc->id = kobj_next_id++;
118 	}
119 
120 	/*
121 	 * Then initialise the ops table.
122 	 */
123 	for (i = 0; i < KOBJ_CACHE_SIZE; i++)
124 		ops->cache[i] = &null_method;
125 	ops->cls = cls;
126 	cls->ops = ops;
127 }
128 
129 static int
130 kobj_class_compile1(kobj_class_t cls, int mflags)
131 {
132 	kobj_ops_t ops;
133 
134 	KOBJ_ASSERT(MA_NOTOWNED);
135 
136 	ops = malloc(sizeof(struct kobj_ops), M_KOBJ, mflags);
137 	if (ops == NULL)
138 		return (ENOMEM);
139 
140 	/*
141 	 * We may have lost a race for kobj_class_compile here - check
142 	 * to make sure someone else hasn't already compiled this
143 	 * class.
144 	 */
145 	KOBJ_LOCK();
146 	if (cls->ops) {
147 		KOBJ_UNLOCK();
148 		free(ops, M_KOBJ);
149 		return (0);
150 	}
151 	kobj_class_compile_common(cls, ops);
152 	KOBJ_UNLOCK();
153 	return (0);
154 }
155 
156 void
157 kobj_class_compile(kobj_class_t cls)
158 {
159 	int error __diagused;
160 
161 	error = kobj_class_compile1(cls, M_WAITOK);
162 	KASSERT(error == 0, ("kobj_class_compile1 returned %d", error));
163 }
164 
165 void
166 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
167 {
168 
169 	KASSERT(kobj_mutex_inited == 0,
170 	    ("%s: only supported during early cycles", __func__));
171 
172 	/*
173 	 * Increment refs to make sure that the ops table is not freed.
174 	 */
175 	cls->refs++;
176 	kobj_class_compile_common(cls, ops);
177 }
178 
179 static kobj_method_t*
180 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
181 {
182 	kobj_method_t *methods = cls->methods;
183 	kobj_method_t *ce;
184 
185 	for (ce = methods; ce && ce->desc; ce++) {
186 		if (ce->desc == desc) {
187 			return ce;
188 		}
189 	}
190 
191 	return NULL;
192 }
193 
194 static kobj_method_t*
195 kobj_lookup_method_mi(kobj_class_t cls,
196 		      kobjop_desc_t desc)
197 {
198 	kobj_method_t *ce;
199 	kobj_class_t *basep;
200 
201 	ce = kobj_lookup_method_class(cls, desc);
202 	if (ce)
203 		return ce;
204 
205 	basep = cls->baseclasses;
206 	if (basep) {
207 		for (; *basep; basep++) {
208 			ce = kobj_lookup_method_mi(*basep, desc);
209 			if (ce)
210 				return ce;
211 		}
212 	}
213 
214 	return NULL;
215 }
216 
217 kobj_method_t*
218 kobj_lookup_method(kobj_class_t cls,
219 		   kobj_method_t **cep,
220 		   kobjop_desc_t desc)
221 {
222 	kobj_method_t *ce;
223 
224 	ce = kobj_lookup_method_mi(cls, desc);
225 	if (!ce)
226 		ce = &desc->deflt;
227 	if (cep)
228 		*cep = ce;
229 	return ce;
230 }
231 
232 void
233 kobj_class_free(kobj_class_t cls)
234 {
235 	void* ops = NULL;
236 
237 	KOBJ_ASSERT(MA_NOTOWNED);
238 	KOBJ_LOCK();
239 
240 	/*
241 	 * Protect against a race between kobj_create and
242 	 * kobj_delete.
243 	 */
244 	if (cls->refs == 0) {
245 		/*
246 		 * For now we don't do anything to unregister any methods
247 		 * which are no longer used.
248 		 */
249 
250 		/*
251 		 * Free memory and clean up.
252 		 */
253 		ops = cls->ops;
254 		cls->ops = NULL;
255 	}
256 
257 	KOBJ_UNLOCK();
258 
259 	if (ops)
260 		free(ops, M_KOBJ);
261 }
262 
263 static void
264 kobj_init_common(kobj_t obj, kobj_class_t cls)
265 {
266 
267 	obj->ops = cls->ops;
268 	cls->refs++;
269 }
270 
271 static int
272 kobj_init1(kobj_t obj, kobj_class_t cls, int mflags)
273 {
274 	int error;
275 
276 	KOBJ_LOCK();
277 	while (cls->ops == NULL) {
278 		/*
279 		 * kobj_class_compile doesn't want the lock held
280 		 * because of the call to malloc - we drop the lock
281 		 * and re-try.
282 		 */
283 		KOBJ_UNLOCK();
284 		error = kobj_class_compile1(cls, mflags);
285 		if (error != 0)
286 			return (error);
287 		KOBJ_LOCK();
288 	}
289 	kobj_init_common(obj, cls);
290 	KOBJ_UNLOCK();
291 	return (0);
292 }
293 
294 kobj_t
295 kobj_create(kobj_class_t cls, struct malloc_type *mtype, int mflags)
296 {
297 	kobj_t obj;
298 
299 	obj = malloc(cls->size, mtype, mflags | M_ZERO);
300 	if (obj == NULL)
301 		return (NULL);
302 	if (kobj_init1(obj, cls, mflags) != 0) {
303 		free(obj, mtype);
304 		return (NULL);
305 	}
306 	return (obj);
307 }
308 
309 void
310 kobj_init(kobj_t obj, kobj_class_t cls)
311 {
312 	int error;
313 
314 	error = kobj_init1(obj, cls, M_NOWAIT);
315 	if (error != 0)
316 		panic("kobj_init1 failed: error %d", error);
317 }
318 
319 void
320 kobj_init_static(kobj_t obj, kobj_class_t cls)
321 {
322 
323 	KASSERT(kobj_mutex_inited == 0,
324 	    ("%s: only supported during early cycles", __func__));
325 
326 	kobj_init_common(obj, cls);
327 }
328 
329 void
330 kobj_delete(kobj_t obj, struct malloc_type *mtype)
331 {
332 	kobj_class_t cls = obj->ops->cls;
333 	int refs;
334 
335 	/*
336 	 * Consider freeing the compiled method table for the class
337 	 * after its last instance is deleted. As an optimisation, we
338 	 * should defer this for a short while to avoid thrashing.
339 	 */
340 	KOBJ_ASSERT(MA_NOTOWNED);
341 	KOBJ_LOCK();
342 	cls->refs--;
343 	refs = cls->refs;
344 	KOBJ_UNLOCK();
345 
346 	if (!refs)
347 		kobj_class_free(cls);
348 
349 	obj->ops = NULL;
350 	if (mtype)
351 		free(obj, mtype);
352 }
353