xref: /freebsd/sys/kern/subr_kobj.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2000,2003 Doug Rabson
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/kobj.h>
33 #include <sys/lock.h>
34 #include <sys/malloc.h>
35 #include <sys/mutex.h>
36 #include <sys/sysctl.h>
37 #ifndef TEST
38 #include <sys/systm.h>
39 #endif
40 
41 #ifdef TEST
42 #include "usertest.h"
43 #endif
44 
45 static MALLOC_DEFINE(M_KOBJ, "kobj", "Kernel object structures");
46 
47 #ifdef KOBJ_STATS
48 
49 u_int kobj_lookup_hits;
50 u_int kobj_lookup_misses;
51 
52 SYSCTL_UINT(_kern, OID_AUTO, kobj_hits, CTLFLAG_RD,
53 	   &kobj_lookup_hits, 0, "");
54 SYSCTL_UINT(_kern, OID_AUTO, kobj_misses, CTLFLAG_RD,
55 	   &kobj_lookup_misses, 0, "");
56 
57 #endif
58 
59 static struct mtx kobj_mtx;
60 static int kobj_mutex_inited;
61 static int kobj_next_id = 1;
62 
63 #define	KOBJ_LOCK()		mtx_lock(&kobj_mtx)
64 #define	KOBJ_UNLOCK()		mtx_unlock(&kobj_mtx)
65 #define	KOBJ_ASSERT(what)	mtx_assert(&kobj_mtx, what);
66 
67 SYSCTL_INT(_kern, OID_AUTO, kobj_methodcount, CTLFLAG_RD,
68     &kobj_next_id, 0,
69     "Number of kernel object methods registered");
70 
71 static void
72 kobj_init_mutex(void *arg)
73 {
74 	if (!kobj_mutex_inited) {
75 		mtx_init(&kobj_mtx, "kobj", NULL, MTX_DEF);
76 		kobj_mutex_inited = 1;
77 	}
78 }
79 
80 SYSINIT(kobj, SI_SUB_LOCK, SI_ORDER_ANY, kobj_init_mutex, NULL);
81 
82 /*
83  * This method structure is used to initialise new caches. Since the
84  * desc pointer is NULL, it is guaranteed never to match any read
85  * descriptors.
86  */
87 static const struct kobj_method null_method = {
88 	0, 0,
89 };
90 
91 int
92 kobj_error_method(void)
93 {
94 
95 	return ENXIO;
96 }
97 
98 static void
99 kobj_class_compile_common(kobj_class_t cls, kobj_ops_t ops)
100 {
101 	kobj_method_t *m;
102 	int i;
103 
104 	/*
105 	 * Don't do anything if we are already compiled.
106 	 */
107 	if (cls->ops)
108 		return;
109 
110 	/*
111 	 * First register any methods which need it.
112 	 */
113 	for (m = cls->methods; m->desc; m++) {
114 		if (m->desc->id == 0)
115 			m->desc->id = kobj_next_id++;
116 	}
117 
118 	/*
119 	 * Then initialise the ops table.
120 	 */
121 	for (i = 0; i < KOBJ_CACHE_SIZE; i++)
122 		ops->cache[i] = &null_method;
123 	ops->cls = cls;
124 	cls->ops = ops;
125 }
126 
127 static int
128 kobj_class_compile1(kobj_class_t cls, int mflags)
129 {
130 	kobj_ops_t ops;
131 
132 	KOBJ_ASSERT(MA_NOTOWNED);
133 
134 	ops = malloc(sizeof(struct kobj_ops), M_KOBJ, mflags);
135 	if (ops == NULL)
136 		return (ENOMEM);
137 
138 	/*
139 	 * We may have lost a race for kobj_class_compile here - check
140 	 * to make sure someone else hasn't already compiled this
141 	 * class.
142 	 */
143 	KOBJ_LOCK();
144 	if (cls->ops) {
145 		KOBJ_UNLOCK();
146 		free(ops, M_KOBJ);
147 		return (0);
148 	}
149 	kobj_class_compile_common(cls, ops);
150 	KOBJ_UNLOCK();
151 	return (0);
152 }
153 
154 void
155 kobj_class_compile(kobj_class_t cls)
156 {
157 	int error __diagused;
158 
159 	error = kobj_class_compile1(cls, M_WAITOK);
160 	KASSERT(error == 0, ("kobj_class_compile1 returned %d", error));
161 }
162 
163 void
164 kobj_class_compile_static(kobj_class_t cls, kobj_ops_t ops)
165 {
166 
167 	KASSERT(kobj_mutex_inited == 0,
168 	    ("%s: only supported during early cycles", __func__));
169 
170 	/*
171 	 * Increment refs to make sure that the ops table is not freed.
172 	 */
173 	cls->refs++;
174 	kobj_class_compile_common(cls, ops);
175 }
176 
177 static kobj_method_t*
178 kobj_lookup_method_class(kobj_class_t cls, kobjop_desc_t desc)
179 {
180 	kobj_method_t *methods = cls->methods;
181 	kobj_method_t *ce;
182 
183 	for (ce = methods; ce && ce->desc; ce++) {
184 		if (ce->desc == desc) {
185 			return ce;
186 		}
187 	}
188 
189 	return NULL;
190 }
191 
192 static kobj_method_t*
193 kobj_lookup_method_mi(kobj_class_t cls,
194 		      kobjop_desc_t desc)
195 {
196 	kobj_method_t *ce;
197 	kobj_class_t *basep;
198 
199 	ce = kobj_lookup_method_class(cls, desc);
200 	if (ce)
201 		return ce;
202 
203 	basep = cls->baseclasses;
204 	if (basep) {
205 		for (; *basep; basep++) {
206 			ce = kobj_lookup_method_mi(*basep, desc);
207 			if (ce)
208 				return ce;
209 		}
210 	}
211 
212 	return NULL;
213 }
214 
215 kobj_method_t*
216 kobj_lookup_method(kobj_class_t cls,
217 		   kobj_method_t **cep,
218 		   kobjop_desc_t desc)
219 {
220 	kobj_method_t *ce;
221 
222 	ce = kobj_lookup_method_mi(cls, desc);
223 	if (!ce)
224 		ce = &desc->deflt;
225 	if (cep)
226 		*cep = ce;
227 	return ce;
228 }
229 
230 void
231 kobj_class_free(kobj_class_t cls)
232 {
233 	void* ops = NULL;
234 
235 	KOBJ_ASSERT(MA_NOTOWNED);
236 	KOBJ_LOCK();
237 
238 	/*
239 	 * Protect against a race between kobj_create and
240 	 * kobj_delete.
241 	 */
242 	if (cls->refs == 0) {
243 		/*
244 		 * For now we don't do anything to unregister any methods
245 		 * which are no longer used.
246 		 */
247 
248 		/*
249 		 * Free memory and clean up.
250 		 */
251 		ops = cls->ops;
252 		cls->ops = NULL;
253 	}
254 
255 	KOBJ_UNLOCK();
256 
257 	if (ops)
258 		free(ops, M_KOBJ);
259 }
260 
261 static void
262 kobj_init_common(kobj_t obj, kobj_class_t cls)
263 {
264 
265 	obj->ops = cls->ops;
266 	cls->refs++;
267 }
268 
269 static int
270 kobj_init1(kobj_t obj, kobj_class_t cls, int mflags)
271 {
272 	int error;
273 
274 	KOBJ_LOCK();
275 	while (cls->ops == NULL) {
276 		/*
277 		 * kobj_class_compile doesn't want the lock held
278 		 * because of the call to malloc - we drop the lock
279 		 * and re-try.
280 		 */
281 		KOBJ_UNLOCK();
282 		error = kobj_class_compile1(cls, mflags);
283 		if (error != 0)
284 			return (error);
285 		KOBJ_LOCK();
286 	}
287 	kobj_init_common(obj, cls);
288 	KOBJ_UNLOCK();
289 	return (0);
290 }
291 
292 kobj_t
293 kobj_create(kobj_class_t cls, struct malloc_type *mtype, int mflags)
294 {
295 	kobj_t obj;
296 
297 	obj = malloc(cls->size, mtype, mflags | M_ZERO);
298 	if (obj == NULL)
299 		return (NULL);
300 	if (kobj_init1(obj, cls, mflags) != 0) {
301 		free(obj, mtype);
302 		return (NULL);
303 	}
304 	return (obj);
305 }
306 
307 void
308 kobj_init(kobj_t obj, kobj_class_t cls)
309 {
310 	int error;
311 
312 	error = kobj_init1(obj, cls, M_NOWAIT);
313 	if (error != 0)
314 		panic("kobj_init1 failed: error %d", error);
315 }
316 
317 void
318 kobj_init_static(kobj_t obj, kobj_class_t cls)
319 {
320 
321 	KASSERT(kobj_mutex_inited == 0,
322 	    ("%s: only supported during early cycles", __func__));
323 
324 	kobj_init_common(obj, cls);
325 }
326 
327 void
328 kobj_delete(kobj_t obj, struct malloc_type *mtype)
329 {
330 	kobj_class_t cls = obj->ops->cls;
331 	int refs;
332 
333 	/*
334 	 * Consider freeing the compiled method table for the class
335 	 * after its last instance is deleted. As an optimisation, we
336 	 * should defer this for a short while to avoid thrashing.
337 	 */
338 	KOBJ_ASSERT(MA_NOTOWNED);
339 	KOBJ_LOCK();
340 	cls->refs--;
341 	refs = cls->refs;
342 	KOBJ_UNLOCK();
343 
344 	if (!refs)
345 		kobj_class_free(cls);
346 
347 	obj->ops = NULL;
348 	if (mtype)
349 		free(obj, mtype);
350 }
351