xref: /titanic_51/usr/src/uts/common/os/brand.c (revision 56d930ae56e5cfc2442f5214a7b2c47f08a2b920)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/kmem.h>
29 #include <sys/errno.h>
30 #include <sys/systm.h>
31 #include <sys/cmn_err.h>
32 #include <sys/brand.h>
33 #include <sys/machbrand.h>
34 #include <sys/modctl.h>
35 #include <sys/rwlock.h>
36 #include <sys/zone.h>
37 
38 #define	SUPPORTED_BRAND_VERSION BRAND_VER_1
39 
40 #if defined(__sparcv9)
41 struct brand_mach_ops native_mach_ops  = {
42 		NULL, NULL
43 };
44 #else /* !__sparcv9 */
45 struct brand_mach_ops native_mach_ops  = {
46 		NULL, NULL, NULL, NULL, NULL, NULL
47 };
48 #endif /* !__sparcv9 */
49 
50 brand_t native_brand = {
51 		BRAND_VER_1,
52 		"native",
53 		NULL,
54 		&native_mach_ops
55 };
56 
57 /*
58  * Used to maintain a list of all the brands currently loaded into the
59  * kernel.
60  */
61 struct brand_list {
62 	int			bl_refcnt;
63 	struct brand_list	*bl_next;
64 	brand_t			*bl_brand;
65 };
66 
67 static struct brand_list *brand_list = NULL;
68 
69 /*
70  * Used to enable brand platform specific interposition code
71  */
72 #pragma	weak	brand_plat_interposition_init
73 extern void brand_plat_interposition_init(void);
74 
75 /*
76  * This lock protects the integrity of the brand list.
77  */
78 static kmutex_t brand_list_lock;
79 
80 void
81 brand_init()
82 {
83 	if (brand_plat_interposition_init != NULL)
84 		brand_plat_interposition_init();
85 	mutex_init(&brand_list_lock, NULL, MUTEX_DEFAULT, NULL);
86 	p0.p_brand = &native_brand;
87 }
88 
89 int
90 brand_register(brand_t *brand)
91 {
92 	struct brand_list *list, *scan;
93 
94 	if (brand == NULL)
95 		return (EINVAL);
96 
97 	if (is_system_labeled()) {
98 		cmn_err(CE_WARN,
99 		    "Branded zones are not allowed on labeled systems.");
100 		return (EINVAL);
101 	}
102 
103 	if (brand->b_version != SUPPORTED_BRAND_VERSION) {
104 		if (brand->b_version < SUPPORTED_BRAND_VERSION) {
105 			cmn_err(CE_WARN,
106 			    "brand '%s' was built to run on older versions "
107 			    "of Solaris.",
108 			    brand->b_name);
109 		} else {
110 			cmn_err(CE_WARN,
111 			    "brand '%s' was built to run on a newer version "
112 			    "of Solaris.",
113 			    brand->b_name);
114 		}
115 		return (EINVAL);
116 	}
117 
118 	/* Sanity checks */
119 	if (brand->b_name == NULL || brand->b_ops == NULL ||
120 	    brand->b_ops->b_brandsys == NULL) {
121 		cmn_err(CE_WARN, "Malformed brand");
122 		return (EINVAL);
123 	}
124 
125 	list = kmem_alloc(sizeof (struct brand_list), KM_SLEEP);
126 
127 	/* Add the brand to the list of loaded brands. */
128 	mutex_enter(&brand_list_lock);
129 
130 	/*
131 	 * Check to be sure we haven't already registered this brand.
132 	 */
133 	for (scan = brand_list; scan != NULL; scan = scan->bl_next) {
134 		if (strcmp(brand->b_name, scan->bl_brand->b_name) == 0) {
135 			cmn_err(CE_WARN,
136 			    "Invalid attempt to load a second instance of "
137 			    "brand %s", brand->b_name);
138 			mutex_exit(&brand_list_lock);
139 			kmem_free(list, sizeof (struct brand_list));
140 			return (EINVAL);
141 		}
142 	}
143 
144 	list->bl_brand = brand;
145 	list->bl_refcnt = 0;
146 	list->bl_next = brand_list;
147 	brand_list = list;
148 	mutex_exit(&brand_list_lock);
149 
150 	return (0);
151 }
152 
153 /*
154  * The kernel module implementing this brand is being unloaded, so remove
155  * it from the list of active brands.
156  */
157 int
158 brand_unregister(brand_t *brand)
159 {
160 	struct brand_list *list, *prev;
161 
162 	/* Sanity checks */
163 	if (brand == NULL || brand->b_name == NULL) {
164 		cmn_err(CE_WARN, "Malformed brand");
165 		return (EINVAL);
166 	}
167 
168 	prev = NULL;
169 	mutex_enter(&brand_list_lock);
170 
171 	for (list = brand_list; list != NULL; list = list->bl_next) {
172 		if (list->bl_brand == brand)
173 			break;
174 		prev = list;
175 	}
176 
177 	if (list == NULL) {
178 		cmn_err(CE_WARN, "Brand %s wasn't registered", brand->b_name);
179 		mutex_exit(&brand_list_lock);
180 		return (EINVAL);
181 	}
182 
183 	if (list->bl_refcnt > 0) {
184 		cmn_err(CE_WARN, "Unregistering brand %s which is still in use",
185 		    brand->b_name);
186 		mutex_exit(&brand_list_lock);
187 		return (EBUSY);
188 	}
189 
190 	/* Remove brand from the list */
191 	if (prev != NULL)
192 		prev->bl_next = list->bl_next;
193 	else
194 		brand_list = list->bl_next;
195 
196 	mutex_exit(&brand_list_lock);
197 
198 	kmem_free(list, sizeof (struct brand_list));
199 
200 	return (0);
201 }
202 
203 /*
204  * Record that a zone of this brand has been instantiated.  If the kernel
205  * module implementing this brand's functionality is not present, this
206  * routine attempts to load the module as a side effect.
207  */
208 brand_t *
209 brand_register_zone(struct brand_attr *attr)
210 {
211 	struct brand_list *l = NULL;
212 	ddi_modhandle_t	hdl = NULL;
213 	char *modname;
214 	int err = 0;
215 
216 	if (is_system_labeled()) {
217 		cmn_err(CE_WARN,
218 		    "Branded zones are not allowed on labeled systems.");
219 		return (NULL);
220 	}
221 
222 	/*
223 	 * We make at most two passes through this loop.  The first time
224 	 * through, we're looking to see if this is a new user of an
225 	 * already loaded brand.  If the brand hasn't been loaded, we
226 	 * call ddi_modopen() to force it to be loaded and then make a
227 	 * second pass through the list of brands.  If we don't find the
228 	 * brand the second time through it means that the modname
229 	 * specified in the brand_attr structure doesn't provide the brand
230 	 * specified in the brandname field.  This would suggest a bug in
231 	 * the brand's config.xml file.  We close the module and return
232 	 * 'NULL' to the caller.
233 	 */
234 	for (;;) {
235 		/*
236 		 * Search list of loaded brands
237 		 */
238 		mutex_enter(&brand_list_lock);
239 		for (l = brand_list; l != NULL; l = l->bl_next)
240 			if (strcmp(attr->ba_brandname,
241 			    l->bl_brand->b_name) == 0)
242 				break;
243 		if ((l != NULL) || (hdl != NULL))
244 			break;
245 		mutex_exit(&brand_list_lock);
246 
247 		/*
248 		 * We didn't find that the requested brand has been loaded
249 		 * yet, so we trigger the load of the appropriate kernel
250 		 * module and search the list again.
251 		 */
252 		modname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
253 		(void) strcpy(modname, "brand/");
254 		(void) strcat(modname, attr->ba_modname);
255 		hdl = ddi_modopen(modname, KRTLD_MODE_FIRST, &err);
256 		kmem_free(modname, MAXPATHLEN);
257 
258 		if (err != 0)
259 			return (NULL);
260 	}
261 
262 	/*
263 	 * If we found the matching brand, bump its reference count.
264 	 */
265 	if (l != NULL)
266 		l->bl_refcnt++;
267 
268 	mutex_exit(&brand_list_lock);
269 
270 	if (hdl != NULL)
271 		(void) ddi_modclose(hdl);
272 
273 	return ((l != NULL) ? l->bl_brand : NULL);
274 }
275 
276 /*
277  * Return the number of zones currently using this brand.
278  */
279 int
280 brand_zone_count(struct brand *bp)
281 {
282 	struct brand_list *l;
283 	int cnt = 0;
284 
285 	mutex_enter(&brand_list_lock);
286 	for (l = brand_list; l != NULL; l = l->bl_next)
287 		if (l->bl_brand == bp) {
288 			cnt = l->bl_refcnt;
289 			break;
290 		}
291 	mutex_exit(&brand_list_lock);
292 
293 	return (cnt);
294 }
295 
296 void
297 brand_unregister_zone(struct brand *bp)
298 {
299 	struct brand_list *list;
300 
301 	mutex_enter(&brand_list_lock);
302 	for (list = brand_list; list != NULL; list = list->bl_next) {
303 		if (list->bl_brand == bp) {
304 			ASSERT(list->bl_refcnt > 0);
305 			list->bl_refcnt--;
306 			break;
307 		}
308 	}
309 	mutex_exit(&brand_list_lock);
310 }
311 
312 void
313 brand_setbrand(proc_t *p)
314 {
315 	brand_t *bp = p->p_zone->zone_brand;
316 
317 	ASSERT(bp != NULL);
318 	ASSERT(p->p_brand == &native_brand);
319 
320 	/*
321 	 * We should only be called from exec(), when we know the process
322 	 * is single-threaded.
323 	 */
324 	ASSERT(p->p_tlist == p->p_tlist->t_forw);
325 
326 	p->p_brand = bp;
327 	if (PROC_IS_BRANDED(p)) {
328 		BROP(p)->b_setbrand(p);
329 		lwp_attach_brand_hdlrs(p->p_tlist->t_lwp);
330 	}
331 }
332 
333 #if defined(__sparcv9)
334 /*
335  * Currently, only sparc has platform level brand syscall interposition.
336  * On x86 we're able to enable syscall interposition on a per-cpu basis
337  * when a branded thread is scheduled to run on a cpu.
338  */
339 
340 /* Local variables needed for dynamic syscall interposition support */
341 static kmutex_t	brand_interposition_lock;
342 static int	brand_interposition_count;
343 static uint32_t	syscall_trap_patch_instr_orig;
344 static uint32_t	syscall_trap32_patch_instr_orig;
345 
346 /* Trap Table syscall entry hot patch points */
347 extern void	syscall_trap_patch_point(void);
348 extern void	syscall_trap32_patch_point(void);
349 
350 /* Alternate syscall entry handlers used when branded zones are running */
351 extern void	syscall_wrapper(void);
352 extern void	syscall_wrapper32(void);
353 
354 /* Macros used to facilitate sparcv9 instruction generation */
355 #define	BA_A_INSTR	0x30800000	/* ba,a addr */
356 #define	DISP22(from, to) \
357 	((((uintptr_t)(to) - (uintptr_t)(from)) >> 2) & 0x3fffff)
358 
359 void
360 brand_plat_interposition_init(void)
361 {
362 	mutex_init(&brand_interposition_lock, NULL, MUTEX_DEFAULT, NULL);
363 	brand_interposition_count = 0;
364 }
365 
366 /*ARGSUSED*/
367 void
368 brand_plat_interposition_enable(brand_t *bp)
369 {
370 	ASSERT((bp != NULL) && (bp != &native_brand));
371 
372 	mutex_enter(&brand_interposition_lock);
373 	ASSERT(brand_interposition_count >= 0);
374 
375 	if (brand_interposition_count++ > 0) {
376 		mutex_exit(&brand_interposition_lock);
377 		return;
378 	}
379 
380 	/*
381 	 * This is the first branded zone that is being enabled on
382 	 * this system.
383 	 *
384 	 * Before we hot patch the kernel save the current instructions
385 	 * so that we can restore them if all branded zones on the
386 	 * system are shutdown.
387 	 */
388 	syscall_trap_patch_instr_orig =
389 	    *(uint32_t *)syscall_trap_patch_point;
390 	syscall_trap32_patch_instr_orig =
391 	    *(uint32_t *)syscall_trap32_patch_point;
392 
393 	/*
394 	 * Modify the trap table at the patch points.
395 	 *
396 	 * We basically replace the first instruction at the patch
397 	 * point with a ba,a instruction that will transfer control
398 	 * to syscall_wrapper or syscall_wrapper32 for 64-bit and
399 	 * 32-bit syscalls respectively.  It's important to note that
400 	 * the annul bit is set in the branch so we don't execute
401 	 * the instruction directly following the one we're patching
402 	 * during the branch's delay slot.
403 	 *
404 	 * It also doesn't matter that we're not atomically updating both
405 	 * the 64 and 32 bit syscall paths at the same time since there's
406 	 * no actual branded processes running on the system yet.
407 	 */
408 	hot_patch_kernel_text((caddr_t)syscall_trap_patch_point,
409 	    BA_A_INSTR | DISP22(syscall_trap_patch_point, syscall_wrapper),
410 	    4);
411 	hot_patch_kernel_text((caddr_t)syscall_trap32_patch_point,
412 	    BA_A_INSTR | DISP22(syscall_trap32_patch_point, syscall_wrapper32),
413 	    4);
414 
415 	mutex_exit(&brand_interposition_lock);
416 }
417 
418 /*ARGSUSED*/
419 void
420 brand_plat_interposition_disable(brand_t *bp)
421 {
422 	ASSERT((bp != NULL) && (bp != &native_brand));
423 
424 	mutex_enter(&brand_interposition_lock);
425 	ASSERT(brand_interposition_count > 0);
426 
427 	if (--brand_interposition_count > 0) {
428 		mutex_exit(&brand_interposition_lock);
429 		return;
430 	}
431 
432 	/*
433 	 * The last branded zone on this system has been shutdown.
434 	 *
435 	 * Restore the original instructions at the trap table syscall
436 	 * patch points to disable the brand syscall interposition
437 	 * mechanism.
438 	 */
439 	hot_patch_kernel_text((caddr_t)syscall_trap_patch_point,
440 	    syscall_trap_patch_instr_orig, 4);
441 	hot_patch_kernel_text((caddr_t)syscall_trap32_patch_point,
442 	    syscall_trap32_patch_instr_orig, 4);
443 
444 	mutex_exit(&brand_interposition_lock);
445 }
446 #endif /* __sparcv9 */
447