xref: /titanic_52/usr/src/uts/common/os/labelsys.c (revision 45916cd2fec6e79bca5dee0421bd39e3c2910d1e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #include <sys/systm.h>
29 #include <sys/types.h>
30 #include <sys/stream.h>
31 #include <sys/kmem.h>
32 #include <sys/strsubr.h>
33 #include <sys/cmn_err.h>
34 #include <sys/debug.h>
35 #include <sys/param.h>
36 #include <sys/model.h>
37 #include <sys/errno.h>
38 #include <sys/modhash.h>
39 
40 #include <sys/policy.h>
41 #include <sys/tsol/label.h>
42 #include <sys/tsol/tsyscall.h>
43 #include <sys/tsol/tndb.h>
44 #include <sys/tsol/tnet.h>
45 #include <sys/disp.h>
46 
47 #include <inet/ip.h>
48 #include <inet/ip6.h>
49 #include <sys/sdt.h>
50 
51 static mod_hash_t *tpc_name_hash;	/* hash of cache entries by name */
52 static kmutex_t tpc_lock;
53 
54 static tsol_tpc_t *tpc_unlab;
55 
56 /*
57  * tnrhc_table and tnrhc_table_v6 are similar to the IP forwarding tables
58  * in organization and search. The tnrhc_table[_v6] is an array of 33/129
59  * pointers to the 33/129 tnrhc tables indexed by the prefix length.
60  * A largest prefix match search is done by find_rhc_v[46] and it walks the
61  * tables from the most specific to the least specific table. Table 0
62  * corresponds to the single entry for 0.0.0.0/0 or ::0/0.
63  */
64 tnrhc_hash_t *tnrhc_table[TSOL_MASK_TABLE_SIZE];
65 tnrhc_hash_t *tnrhc_table_v6[TSOL_MASK_TABLE_SIZE_V6];
66 kmutex_t tnrhc_g_lock;
67 
68 static void tsol_create_i_tmpls(void);
69 
70 static void tsol_create_i_tnrh(const tnaddr_t *);
71 
72 /* List of MLPs on valid on shared addresses */
73 static tsol_mlp_list_t shared_mlps;
74 
75 /*
76  * Convert length for a mask to the mask.
77  */
78 static ipaddr_t
79 tsol_plen_to_mask(uint_t masklen)
80 {
81 	return (masklen == 0 ? 0 : htonl(IP_HOST_MASK << (IP_ABITS - masklen)));
82 }
83 
84 /*
85  * Convert a prefix length to the mask for that prefix.
86  * Returns the argument bitmask.
87  */
88 static void
89 tsol_plen_to_mask_v6(uint_t plen, in6_addr_t *bitmask)
90 {
91 	uint32_t *ptr;
92 
93 	ASSERT(plen <= IPV6_ABITS);
94 
95 	ptr = (uint32_t *)bitmask;
96 	while (plen >= 32) {
97 		*ptr++ = 0xffffffffU;
98 		plen -= 32;
99 	}
100 	if (plen > 0)
101 		*ptr++ = htonl(0xffffffff << (32 - plen));
102 	while (ptr < (uint32_t *)(bitmask + 1))
103 		*ptr++ = 0;
104 }
105 
106 boolean_t
107 tnrhc_init_table(tnrhc_hash_t *table[], short prefix_len, int kmflag)
108 {
109 	int	i;
110 
111 	mutex_enter(&tnrhc_g_lock);
112 
113 	if (table[prefix_len] == NULL) {
114 		table[prefix_len] = (tnrhc_hash_t *)
115 		    kmem_zalloc(TNRHC_SIZE * sizeof (tnrhc_hash_t), kmflag);
116 		if (table[prefix_len] == NULL) {
117 			mutex_exit(&tnrhc_g_lock);
118 			return (B_FALSE);
119 		}
120 		for (i = 0; i < TNRHC_SIZE; i++) {
121 			mutex_init(&table[prefix_len][i].tnrh_lock,
122 			    NULL, MUTEX_DEFAULT, 0);
123 		}
124 	}
125 	mutex_exit(&tnrhc_g_lock);
126 	return (B_TRUE);
127 }
128 
129 void
130 tcache_init(void)
131 {
132 	tnaddr_t address;
133 
134 	/*
135 	 * Note: unable to use mod_hash_create_strhash here, since it's
136 	 * assymetric.  It assumes that the user has allocated exactly
137 	 * strlen(key) + 1 bytes for the key when inserted, and attempts to
138 	 * kmem_free that memory on a delete.
139 	 */
140 	tpc_name_hash = mod_hash_create_extended("tnrhtpc_by_name", 256,
141 	    mod_hash_null_keydtor,  mod_hash_null_valdtor, mod_hash_bystr,
142 	    NULL, mod_hash_strkey_cmp, KM_SLEEP);
143 	mutex_init(&tpc_lock, NULL, MUTEX_DEFAULT, NULL);
144 
145 	mutex_init(&tnrhc_g_lock, NULL, MUTEX_DEFAULT, NULL);
146 
147 	/* label_init always called before tcache_init */
148 	ASSERT(l_admin_low != NULL && l_admin_high != NULL);
149 
150 	/* Initialize the zeroth table prior to loading the 0.0.0.0 entry */
151 	(void) tnrhc_init_table(tnrhc_table, 0, KM_SLEEP);
152 	(void) tnrhc_init_table(tnrhc_table_v6, 0, KM_SLEEP);
153 	/*
154 	 * create an internal host template called "_unlab"
155 	 */
156 	tsol_create_i_tmpls();
157 
158 	/*
159 	 * create a host entry, 0.0.0.0 = _unlab
160 	 */
161 	bzero(&address, sizeof (tnaddr_t));
162 	address.ta_family = AF_INET;
163 	tsol_create_i_tnrh(&address);
164 
165 	/*
166 	 * create a host entry, ::0 = _unlab
167 	 */
168 	address.ta_family = AF_INET6;
169 	tsol_create_i_tnrh(&address);
170 
171 	rw_init(&shared_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
172 }
173 
174 /* Called only by the TNRHC_RELE macro when the refcount goes to zero. */
175 void
176 tnrhc_free(tsol_tnrhc_t *tnrhc)
177 {
178 	/*
179 	 * We assert rhc_invalid here to make sure that no new thread could
180 	 * possibly end up finding this entry.  If it could, then the
181 	 * mutex_destroy would panic.
182 	 */
183 	DTRACE_PROBE1(tx__tndb__l3__tnrhcfree, tsol_tnrhc_t *, tnrhc);
184 	ASSERT(tnrhc->rhc_next == NULL && tnrhc->rhc_invalid);
185 	mutex_exit(&tnrhc->rhc_lock);
186 	mutex_destroy(&tnrhc->rhc_lock);
187 	if (tnrhc->rhc_tpc != NULL)
188 		TPC_RELE(tnrhc->rhc_tpc);
189 	kmem_free(tnrhc, sizeof (*tnrhc));
190 }
191 
192 /* Called only by the TPC_RELE macro when the refcount goes to zero. */
193 void
194 tpc_free(tsol_tpc_t *tpc)
195 {
196 	DTRACE_PROBE1(tx__tndb__l3__tpcfree, tsol_tpc_t *, tpc);
197 	ASSERT(tpc->tpc_invalid);
198 	mutex_exit(&tpc->tpc_lock);
199 	mutex_destroy(&tpc->tpc_lock);
200 	kmem_free(tpc, sizeof (*tpc));
201 }
202 
203 /*
204  * Find and hold a reference to a template entry by name.  Ignores entries that
205  * are being deleted.
206  */
207 static tsol_tpc_t *
208 tnrhtp_find(const char *name, mod_hash_t *hash)
209 {
210 	mod_hash_val_t hv;
211 	tsol_tpc_t *tpc = NULL;
212 
213 	mutex_enter(&tpc_lock);
214 	if (mod_hash_find(hash, (mod_hash_key_t)name, &hv) == 0) {
215 		tpc = (tsol_tpc_t *)hv;
216 		if (tpc->tpc_invalid)
217 			tpc = NULL;
218 		else
219 			TPC_HOLD(tpc);
220 	}
221 	mutex_exit(&tpc_lock);
222 	return (tpc);
223 }
224 
225 static int
226 tnrh_delete(const tsol_rhent_t *rhent)
227 {
228 	tsol_tnrhc_t *current;
229 	tsol_tnrhc_t **prevp;
230 	ipaddr_t tmpmask;
231 	in6_addr_t tmpmask_v6;
232 	tnrhc_hash_t *tnrhc_hash;
233 
234 	if (rhent->rh_address.ta_family == AF_INET) {
235 		if (rhent->rh_prefix < 0 || rhent->rh_prefix > IP_ABITS)
236 			return (EINVAL);
237 		if (tnrhc_table[rhent->rh_prefix] == NULL)
238 			return (ENOENT);
239 		tmpmask = tsol_plen_to_mask(rhent->rh_prefix);
240 		tnrhc_hash = &tnrhc_table[rhent->rh_prefix][
241 		    TSOL_ADDR_HASH(rhent->rh_address.ta_addr_v4.s_addr &
242 		    tmpmask, TNRHC_SIZE)];
243 	} else if (rhent->rh_address.ta_family == AF_INET6) {
244 		if (rhent->rh_prefix < 0 || rhent->rh_prefix > IPV6_ABITS)
245 			return (EINVAL);
246 		if (tnrhc_table_v6[rhent->rh_prefix] == NULL)
247 			return (ENOENT);
248 		tsol_plen_to_mask_v6(rhent->rh_prefix, &tmpmask_v6);
249 		tnrhc_hash = &tnrhc_table_v6[rhent->rh_prefix][
250 		    TSOL_ADDR_MASK_HASH_V6(rhent->rh_address.ta_addr_v6,
251 		    tmpmask_v6, TNRHC_SIZE)];
252 	} else {
253 		return (EAFNOSUPPORT);
254 	}
255 
256 	/* search for existing entry */
257 	mutex_enter(&tnrhc_hash->tnrh_lock);
258 	prevp = &tnrhc_hash->tnrh_list;
259 	while ((current = *prevp) != NULL) {
260 		if (TNADDR_EQ(&rhent->rh_address, &current->rhc_host))
261 			break;
262 		prevp = &current->rhc_next;
263 	}
264 
265 	if (current != NULL) {
266 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete_existingrhentry);
267 		*prevp = current->rhc_next;
268 		mutex_enter(&current->rhc_lock);
269 		current->rhc_next = NULL;
270 		current->rhc_invalid = 1;
271 		mutex_exit(&current->rhc_lock);
272 		TNRHC_RELE(current);
273 	}
274 	mutex_exit(&tnrhc_hash->tnrh_lock);
275 	return (current == NULL ? ENOENT : 0);
276 }
277 
278 /*
279  * Flush all remote host entries from the database.
280  *
281  * Note that the htable arrays themselves do not have reference counters, so,
282  * unlike the remote host entries, they cannot be freed.
283  */
284 static void
285 flush_rh_table(tnrhc_hash_t **htable, int nbits)
286 {
287 	tnrhc_hash_t *hent, *hend;
288 	tsol_tnrhc_t *rhc, *rhnext;
289 
290 	while (--nbits >= 0) {
291 		if ((hent = htable[nbits]) == NULL)
292 			continue;
293 		hend = hent + TNRHC_SIZE;
294 		while (hent < hend) {
295 			/*
296 			 * List walkers hold this lock during the walk.  It
297 			 * protects tnrh_list and rhc_next.
298 			 */
299 			mutex_enter(&hent->tnrh_lock);
300 			rhnext = hent->tnrh_list;
301 			hent->tnrh_list = NULL;
302 			mutex_exit(&hent->tnrh_lock);
303 			/*
304 			 * There may still be users of the rhcs at this point,
305 			 * but not of the list or its next pointer.  Thus, the
306 			 * only thing that would need to be done under a lock
307 			 * is setting the invalid bit, but that's atomic
308 			 * anyway, so no locks needed here.
309 			 */
310 			while ((rhc = rhnext) != NULL) {
311 				rhnext = rhc->rhc_next;
312 				rhc->rhc_next = NULL;
313 				rhc->rhc_invalid = 1;
314 				TNRHC_RELE(rhc);
315 			}
316 			hent++;
317 		}
318 	}
319 }
320 
321 /*
322  * Load a remote host entry into kernel cache.  Create a new one if a matching
323  * entry isn't found, otherwise replace the contents of the previous one by
324  * deleting it and recreating it.  (Delete and recreate is used to avoid
325  * allowing other threads to see an unstable data structure.)
326  *
327  * A "matching" entry is the one whose address matches that of the one
328  * being loaded.
329  *
330  * Return 0 for success, error code for failure.
331  */
332 int
333 tnrh_load(const tsol_rhent_t *rhent)
334 {
335 	tsol_tnrhc_t **rhp;
336 	tsol_tnrhc_t *rh, *new;
337 	tsol_tpc_t *tpc;
338 	ipaddr_t tmpmask;
339 	in6_addr_t tmpmask_v6;
340 	tnrhc_hash_t *tnrhc_hash;
341 
342 	/* Find the existing entry, if any, leaving the hash locked */
343 	if (rhent->rh_address.ta_family == AF_INET) {
344 		if (rhent->rh_prefix < 0 || rhent->rh_prefix > IP_ABITS)
345 			return (EINVAL);
346 		if (tnrhc_table[rhent->rh_prefix] == NULL &&
347 		    !tnrhc_init_table(tnrhc_table, rhent->rh_prefix,
348 		    KM_NOSLEEP))
349 			return (ENOMEM);
350 		tmpmask = tsol_plen_to_mask(rhent->rh_prefix);
351 		tnrhc_hash = &tnrhc_table[rhent->rh_prefix][
352 		    TSOL_ADDR_HASH(rhent->rh_address.ta_addr_v4.s_addr &
353 		    tmpmask, TNRHC_SIZE)];
354 		mutex_enter(&tnrhc_hash->tnrh_lock);
355 		for (rhp = &tnrhc_hash->tnrh_list; (rh = *rhp) != NULL;
356 		    rhp = &rh->rhc_next) {
357 			ASSERT(rh->rhc_host.ta_family == AF_INET);
358 			if (((rh->rhc_host.ta_addr_v4.s_addr ^
359 			    rhent->rh_address.ta_addr_v4.s_addr) & tmpmask) ==
360 			    0)
361 				break;
362 		}
363 	} else if (rhent->rh_address.ta_family == AF_INET6) {
364 		if (rhent->rh_prefix < 0 || rhent->rh_prefix > IPV6_ABITS)
365 			return (EINVAL);
366 		if (tnrhc_table_v6[rhent->rh_prefix] == NULL &&
367 		    !tnrhc_init_table(tnrhc_table_v6, rhent->rh_prefix,
368 		    KM_NOSLEEP))
369 			return (ENOMEM);
370 		tsol_plen_to_mask_v6(rhent->rh_prefix, &tmpmask_v6);
371 		tnrhc_hash = &tnrhc_table_v6[rhent->rh_prefix][
372 		    TSOL_ADDR_MASK_HASH_V6(rhent->rh_address.ta_addr_v6,
373 		    tmpmask_v6, TNRHC_SIZE)];
374 		mutex_enter(&tnrhc_hash->tnrh_lock);
375 		for (rhp = &tnrhc_hash->tnrh_list; (rh = *rhp) != NULL;
376 		    rhp = &rh->rhc_next) {
377 			ASSERT(rh->rhc_host.ta_family == AF_INET6);
378 			if (V6_MASK_EQ_2(rh->rhc_host.ta_addr_v6, tmpmask_v6,
379 			    rhent->rh_address.ta_addr_v6))
380 				break;
381 		}
382 	} else {
383 		return (EAFNOSUPPORT);
384 	}
385 
386 	if ((new = kmem_zalloc(sizeof (*new), KM_NOSLEEP)) == NULL) {
387 		mutex_exit(&tnrhc_hash->tnrh_lock);
388 		return (ENOMEM);
389 	}
390 
391 	/* Find and bump the reference count on the named template */
392 	if ((tpc = tnrhtp_find(rhent->rh_template, tpc_name_hash)) == NULL) {
393 		mutex_exit(&tnrhc_hash->tnrh_lock);
394 		kmem_free(new, sizeof (*new));
395 		return (EINVAL);
396 	}
397 
398 	/* Clobber the old remote host entry. */
399 	if (rh != NULL) {
400 		ASSERT(!rh->rhc_invalid);
401 		rh->rhc_invalid = 1;
402 		*rhp = rh->rhc_next;
403 		rh->rhc_next = NULL;
404 		TNRHC_RELE(rh);
405 	}
406 
407 	/* Initialize the new entry. */
408 	mutex_init(&new->rhc_lock, NULL, MUTEX_DEFAULT, NULL);
409 	new->rhc_host = rhent->rh_address;
410 
411 	/* The rhc now owns this tpc reference, so no TPC_RELE past here */
412 	new->rhc_tpc = tpc;
413 
414 	ASSERT(tpc->tpc_tp.host_type == UNLABELED ||
415 	    tpc->tpc_tp.host_type == SUN_CIPSO);
416 
417 	TNRHC_HOLD(new);
418 	new->rhc_next = tnrhc_hash->tnrh_list;
419 	tnrhc_hash->tnrh_list = new;
420 	DTRACE_PROBE(tx__tndb__l2__tnrhload__addedrh);
421 	mutex_exit(&tnrhc_hash->tnrh_lock);
422 
423 	return (0);
424 }
425 
426 static int
427 tnrh_get(tsol_rhent_t *rhent)
428 {
429 	tsol_tpc_t *tpc;
430 
431 	switch (rhent->rh_address.ta_family) {
432 	case AF_INET:
433 		tpc = find_tpc(&rhent->rh_address.ta_addr_v4, IPV4_VERSION,
434 		    B_TRUE);
435 		break;
436 
437 	case AF_INET6:
438 		tpc = find_tpc(&rhent->rh_address.ta_addr_v6, IPV6_VERSION,
439 		    B_TRUE);
440 		break;
441 
442 	default:
443 		return (EINVAL);
444 	}
445 	if (tpc == NULL)
446 		return (ENOENT);
447 
448 	DTRACE_PROBE2(tx__tndb__l4__tnrhget__foundtpc, tsol_rhent_t *,
449 	    rhent, tsol_tpc_t *, tpc);
450 	bcopy(tpc->tpc_tp.name, rhent->rh_template,
451 	    sizeof (rhent->rh_template));
452 	TPC_RELE(tpc);
453 	return (0);
454 }
455 
456 static boolean_t
457 template_name_ok(const char *name)
458 {
459 	const char *name_end = name + TNTNAMSIZ;
460 
461 	while (name < name_end) {
462 		if (*name == '\0')
463 			break;
464 		name++;
465 	}
466 	return (name < name_end);
467 }
468 
469 static int
470 tnrh(int cmd, void *buf)
471 {
472 	int retv;
473 	tsol_rhent_t rhent;
474 
475 	/* Make sure user has sufficient privilege */
476 	if (cmd != TNDB_GET &&
477 	    (retv = secpolicy_net_config(CRED(), B_FALSE)) != 0)
478 		return (set_errno(retv));
479 
480 	/*
481 	 * Get arguments
482 	 */
483 	if (cmd != TNDB_FLUSH &&
484 	    copyin(buf, &rhent, sizeof (rhent)) != 0) {
485 		DTRACE_PROBE(tx__tndb__l0__tnrhdelete__copyin);
486 		return (set_errno(EFAULT));
487 	}
488 
489 	switch (cmd) {
490 	case TNDB_LOAD:
491 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete__tndbload);
492 		if (!template_name_ok(rhent.rh_template)) {
493 			retv = EINVAL;
494 		} else {
495 			retv = tnrh_load(&rhent);
496 		}
497 		break;
498 
499 	case TNDB_DELETE:
500 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete__tndbdelete);
501 		retv = tnrh_delete(&rhent);
502 		break;
503 
504 	case TNDB_GET:
505 		DTRACE_PROBE(tx__tndb__l4__tnrhdelete__tndbget);
506 		if (!template_name_ok(rhent.rh_template)) {
507 			retv = EINVAL;
508 			break;
509 		}
510 
511 		retv = tnrh_get(&rhent);
512 		if (retv != 0)
513 			break;
514 
515 		/*
516 		 * Copy out result
517 		 */
518 		if (copyout(&rhent, buf, sizeof (rhent)) != 0) {
519 			DTRACE_PROBE(tx__tndb__l0__tnrhdelete__copyout);
520 			retv = EFAULT;
521 		}
522 		break;
523 
524 	case TNDB_FLUSH:
525 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete__flush);
526 		flush_rh_table(tnrhc_table, TSOL_MASK_TABLE_SIZE);
527 		flush_rh_table(tnrhc_table_v6, TSOL_MASK_TABLE_SIZE_V6);
528 		break;
529 
530 	default:
531 		DTRACE_PROBE1(tx__tndb__l0__tnrhdelete__unknowncmd,
532 		    int, cmd);
533 		retv = EOPNOTSUPP;
534 		break;
535 	}
536 
537 	if (retv != 0)
538 		return (set_errno(retv));
539 	else
540 		return (retv);
541 }
542 
543 static tsol_tpc_t *
544 tnrhtp_create(const tsol_tpent_t *tpent, int kmflags)
545 {
546 	tsol_tpc_t *tpc;
547 	mod_hash_val_t hv;
548 
549 	/*
550 	 * We intentionally allocate a new entry before taking the lock on the
551 	 * entire database.
552 	 */
553 	if ((tpc = kmem_zalloc(sizeof (*tpc), kmflags)) == NULL)
554 		return (NULL);
555 
556 	mutex_enter(&tpc_lock);
557 	if (mod_hash_find(tpc_name_hash, (mod_hash_key_t)tpent->name,
558 	    &hv) == 0) {
559 		tsol_tpc_t *found_tpc = (tsol_tpc_t *)hv;
560 
561 		found_tpc->tpc_invalid = 1;
562 		(void) mod_hash_destroy(tpc_name_hash,
563 		    (mod_hash_key_t)tpent->name);
564 		TPC_RELE(found_tpc);
565 	}
566 
567 	mutex_init(&tpc->tpc_lock, NULL, MUTEX_DEFAULT, NULL);
568 	/* tsol_tpent_t is the same on LP64 and ILP32 */
569 	bcopy(tpent, &tpc->tpc_tp, sizeof (tpc->tpc_tp));
570 	(void) mod_hash_insert(tpc_name_hash, (mod_hash_key_t)tpc->tpc_tp.name,
571 	    (mod_hash_val_t)tpc);
572 	TPC_HOLD(tpc);
573 	mutex_exit(&tpc_lock);
574 
575 	return (tpc);
576 }
577 
578 static int
579 tnrhtp_delete(const char *tname)
580 {
581 	tsol_tpc_t *tpc;
582 	mod_hash_val_t hv;
583 	int retv = ENOENT;
584 
585 	mutex_enter(&tpc_lock);
586 	if (mod_hash_find(tpc_name_hash, (mod_hash_key_t)tname, &hv) == 0) {
587 		tpc = (tsol_tpc_t *)hv;
588 		ASSERT(!tpc->tpc_invalid);
589 		tpc->tpc_invalid = 1;
590 		(void) mod_hash_destroy(tpc_name_hash,
591 		    (mod_hash_key_t)tpc->tpc_tp.name);
592 		TPC_RELE(tpc);
593 		retv = 0;
594 	}
595 	mutex_exit(&tpc_lock);
596 	return (retv);
597 }
598 
599 /* ARGSUSED */
600 static uint_t
601 tpc_delete(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
602 {
603 	tsol_tpc_t *tpc = (tsol_tpc_t *)val;
604 
605 	ASSERT(!tpc->tpc_invalid);
606 	tpc->tpc_invalid = 1;
607 	TPC_RELE(tpc);
608 	return (MH_WALK_CONTINUE);
609 }
610 
611 static void
612 tnrhtp_flush(void)
613 {
614 	mutex_enter(&tpc_lock);
615 	mod_hash_walk(tpc_name_hash, tpc_delete, NULL);
616 	mod_hash_clear(tpc_name_hash);
617 	mutex_exit(&tpc_lock);
618 }
619 
620 static int
621 tnrhtp(int cmd, void *buf)
622 {
623 	int retv;
624 	int type;
625 	tsol_tpent_t rhtpent;
626 	tsol_tpc_t *tpc;
627 
628 	/* Make sure user has sufficient privilege */
629 	if (cmd != TNDB_GET &&
630 	    (retv = secpolicy_net_config(CRED(), B_FALSE)) != 0)
631 		return (set_errno(retv));
632 
633 	/*
634 	 * Get argument.  Note that tsol_tpent_t is the same on LP64 and ILP32,
635 	 * so no special handling is required.
636 	 */
637 	if (cmd != TNDB_FLUSH) {
638 		if (copyin(buf, &rhtpent, sizeof (rhtpent)) != 0) {
639 			DTRACE_PROBE(tx__tndb__l0__tnrhtp__copyin);
640 			return (set_errno(EFAULT));
641 		}
642 
643 		/*
644 		 * Don't let the user give us a bogus (unterminated) template
645 		 * name.
646 		 */
647 		if (!template_name_ok(rhtpent.name))
648 			return (set_errno(EINVAL));
649 	}
650 
651 	switch (cmd) {
652 	case TNDB_LOAD:
653 		DTRACE_PROBE1(tx__tndb__l2__tnrhtp__tndbload, char *,
654 			rhtpent.name);
655 		type = rhtpent.host_type;
656 		if (type != UNLABELED && type != SUN_CIPSO) {
657 			retv = EINVAL;
658 			break;
659 		}
660 
661 		if (tnrhtp_create(&rhtpent, KM_NOSLEEP) == NULL)
662 			retv = ENOMEM;
663 		else
664 			retv = 0;
665 		break;
666 
667 	case TNDB_GET:
668 		DTRACE_PROBE1(tx__tndb__l4__tnrhtp__tndbget, char *,
669 		    rhtpent.name);
670 		tpc = tnrhtp_find(rhtpent.name, tpc_name_hash);
671 		if (tpc == NULL) {
672 			retv = ENOENT;
673 			break;
674 		}
675 
676 		/* Copy out result */
677 		if (copyout(&tpc->tpc_tp, buf, sizeof (tpc->tpc_tp)) != 0) {
678 			DTRACE_PROBE(tx__tndb__l0__tnrhtp__copyout);
679 			retv = EFAULT;
680 		} else {
681 			retv = 0;
682 		}
683 		TPC_RELE(tpc);
684 		break;
685 
686 	case TNDB_DELETE:
687 		DTRACE_PROBE1(tx__tndb__l4__tnrhtp__tndbdelete, char *,
688 		    rhtpent.name);
689 		retv = tnrhtp_delete(rhtpent.name);
690 		break;
691 
692 	case TNDB_FLUSH:
693 		DTRACE_PROBE(tx__tndb__l4__tnrhtp__flush);
694 		tnrhtp_flush();
695 		retv = 0;
696 		break;
697 
698 	default:
699 		DTRACE_PROBE1(tx__tndb__l0__tnrhtp__unknowncmd, int,
700 		    cmd);
701 		retv = EOPNOTSUPP;
702 		break;
703 	}
704 
705 	if (retv != 0)
706 		return (set_errno(retv));
707 	else
708 		return (retv);
709 }
710 
711 /*
712  * MLP entry ordering logic
713  *
714  * There are two loops in this routine.  The first loop finds the entry that
715  * either logically follows the new entry to be inserted, or is the entry that
716  * precedes and overlaps the new entry, or is NULL to mean end-of-list.  This
717  * is 'tme.'  The second loop scans ahead from that point to find any overlap
718  * on the front or back of this new entry.
719  *
720  * For the first loop, we can have the following cases in the list (note that
721  * the port-portmax range is inclusive):
722  *
723  *	       port   portmax
724  *		+--------+
725  * 1: +------+ ................... precedes; skip to next
726  * 2:	    +------+ ............. overlaps; stop here if same protocol
727  * 3:		+------+ ......... overlaps; stop if same or higher protocol
728  * 4:		    +-------+ .... overlaps or succeeds; stop here
729  *
730  * For the second loop, we can have the following cases (note that we need not
731  * care about other protocol entries at this point, because we're only looking
732  * for overlap, not an insertion point):
733  *
734  *	       port   portmax
735  *		+--------+
736  * 5:	    +------+ ............. overlaps; stop if same protocol
737  * 6:		+------+ ......... overlaps; stop if same protocol
738  * 7:		    +-------+ .... overlaps; stop if same protocol
739  * 8:			   +---+ . follows; search is done
740  *
741  * In other words, this second search needs to consider only whether the entry
742  * has a starting port number that's greater than the end point of the new
743  * entry.  All others are overlaps.
744  */
745 static int
746 mlp_add_del(tsol_mlp_list_t *mlpl, zoneid_t zoneid, uint8_t proto,
747     uint16_t port, uint16_t portmax, boolean_t addflag)
748 {
749 	int retv;
750 	tsol_mlp_entry_t *tme, *tme2, *newent;
751 
752 	if (addflag) {
753 		if ((newent = kmem_zalloc(sizeof (*newent), KM_NOSLEEP)) ==
754 		    NULL)
755 			return (ENOMEM);
756 	} else {
757 		newent = NULL;
758 	}
759 	rw_enter(&mlpl->mlpl_rwlock, RW_WRITER);
760 
761 	/*
762 	 * First loop: find logical insertion point or overlap.  Table is kept
763 	 * in order of port number first, and then, within that, by protocol
764 	 * number.
765 	 */
766 	for (tme = mlpl->mlpl_first; tme != NULL; tme = tme->mlpe_next) {
767 		/* logically next (case 4) */
768 		if (tme->mlpe_mlp.mlp_port > port)
769 			break;
770 		/* if this is logically next or overlap, then stop (case 3) */
771 		if (tme->mlpe_mlp.mlp_port == port &&
772 		    tme->mlpe_mlp.mlp_ipp >= proto)
773 			break;
774 		/* earlier or same port sequence; check for overlap (case 2) */
775 		if (tme->mlpe_mlp.mlp_ipp == proto &&
776 		    tme->mlpe_mlp.mlp_port_upper >= port)
777 			break;
778 		/* otherwise, loop again (case 1) */
779 	}
780 
781 	/* Second loop: scan ahead for overlap */
782 	for (tme2 = tme; tme2 != NULL; tme2 = tme2->mlpe_next) {
783 		/* check if entry follows; no overlap (case 8) */
784 		if (tme2->mlpe_mlp.mlp_port > portmax) {
785 			tme2 = NULL;
786 			break;
787 		}
788 		/* only exact protocol matches at this point (cases 5-7) */
789 		if (tme2->mlpe_mlp.mlp_ipp == proto)
790 			break;
791 	}
792 
793 	retv = 0;
794 	if (addflag) {
795 		if (tme2 != NULL) {
796 			retv = EEXIST;
797 		} else {
798 			newent->mlpe_zoneid = zoneid;
799 			newent->mlpe_mlp.mlp_ipp = proto;
800 			newent->mlpe_mlp.mlp_port = port;
801 			newent->mlpe_mlp.mlp_port_upper = portmax;
802 			newent->mlpe_next = tme;
803 			if (tme == NULL) {
804 				tme2 = mlpl->mlpl_last;
805 				mlpl->mlpl_last = newent;
806 			} else {
807 				tme2 = tme->mlpe_prev;
808 				tme->mlpe_prev = newent;
809 			}
810 			newent->mlpe_prev = tme2;
811 			if (tme2 == NULL)
812 				mlpl->mlpl_first = newent;
813 			else
814 				tme2->mlpe_next = newent;
815 			newent = NULL;
816 		}
817 	} else {
818 		if (tme2 == NULL || tme2->mlpe_mlp.mlp_port != port ||
819 		    tme2->mlpe_mlp.mlp_port_upper != portmax) {
820 			retv = ENOENT;
821 		} else {
822 			if ((tme2 = tme->mlpe_prev) == NULL)
823 				mlpl->mlpl_first = tme->mlpe_next;
824 			else
825 				tme2->mlpe_next = tme->mlpe_next;
826 			if ((tme2 = tme->mlpe_next) == NULL)
827 				mlpl->mlpl_last = tme->mlpe_prev;
828 			else
829 				tme2->mlpe_prev = tme->mlpe_prev;
830 			newent = tme;
831 		}
832 	}
833 	rw_exit(&mlpl->mlpl_rwlock);
834 
835 	if (newent != NULL)
836 		kmem_free(newent, sizeof (*newent));
837 
838 	return (retv);
839 }
840 
841 /*
842  * Add or remove an MLP entry from the database so that the classifier can find
843  * it.
844  *
845  * Note: port number is in host byte order.
846  */
847 int
848 tsol_mlp_anon(zone_t *zone, mlp_type_t mlptype, uchar_t proto, uint16_t port,
849     boolean_t addflag)
850 {
851 	int retv = 0;
852 
853 	if (mlptype == mlptBoth || mlptype == mlptPrivate)
854 		retv = mlp_add_del(&zone->zone_mlps, zone->zone_id, proto,
855 		    port, port, addflag);
856 	if ((retv == 0 || !addflag) &&
857 	    (mlptype == mlptBoth || mlptype == mlptShared)) {
858 		retv = mlp_add_del(&shared_mlps, zone->zone_id, proto, port,
859 		    port, addflag);
860 		if (retv != 0 && addflag)
861 			(void) mlp_add_del(&zone->zone_mlps, zone->zone_id,
862 			    proto, port, port, B_FALSE);
863 	}
864 	return (retv);
865 }
866 
867 static void
868 mlp_flush(tsol_mlp_list_t *mlpl, zoneid_t zoneid)
869 {
870 	tsol_mlp_entry_t *tme, *tme2, *tmnext;
871 
872 	rw_enter(&mlpl->mlpl_rwlock, RW_WRITER);
873 	for (tme = mlpl->mlpl_first; tme != NULL; tme = tmnext) {
874 		tmnext = tme->mlpe_next;
875 		if (zoneid == ALL_ZONES || tme->mlpe_zoneid == zoneid) {
876 			if ((tme2 = tme->mlpe_prev) == NULL)
877 				mlpl->mlpl_first = tmnext;
878 			else
879 				tme2->mlpe_next = tmnext;
880 			if (tmnext == NULL)
881 				mlpl->mlpl_last = tme2;
882 			else
883 				tmnext->mlpe_prev = tme2;
884 			kmem_free(tme, sizeof (*tme));
885 		}
886 	}
887 	rw_exit(&mlpl->mlpl_rwlock);
888 }
889 
890 /*
891  * Note: user supplies port numbers in host byte order.
892  */
893 static int
894 tnmlp(int cmd, void *buf)
895 {
896 	int retv;
897 	tsol_mlpent_t tsme;
898 	zone_t *zone;
899 	tsol_mlp_list_t *mlpl;
900 	tsol_mlp_entry_t *tme;
901 
902 	/* Make sure user has sufficient privilege */
903 	if (cmd != TNDB_GET &&
904 	    (retv = secpolicy_net_config(CRED(), B_FALSE)) != 0)
905 		return (set_errno(retv));
906 
907 	/*
908 	 * Get argument.  Note that tsol_mlpent_t is the same on LP64 and
909 	 * ILP32, so no special handling is required.
910 	 */
911 	if (copyin(buf, &tsme, sizeof (tsme)) != 0) {
912 		DTRACE_PROBE(tx__tndb__l0__tnmlp__copyin);
913 		return (set_errno(EFAULT));
914 	}
915 
916 	/* MLPs on shared IP addresses */
917 	if (tsme.tsme_flags & TSOL_MEF_SHARED) {
918 		zone = NULL;
919 		mlpl = &shared_mlps;
920 	} else {
921 		zone = zone_find_by_id(tsme.tsme_zoneid);
922 		if (zone == NULL)
923 			return (set_errno(EINVAL));
924 		mlpl = &zone->zone_mlps;
925 	}
926 	if (tsme.tsme_mlp.mlp_port_upper == 0)
927 		tsme.tsme_mlp.mlp_port_upper = tsme.tsme_mlp.mlp_port;
928 
929 	switch (cmd) {
930 	case TNDB_LOAD:
931 		DTRACE_PROBE1(tx__tndb__l2__tnmlp__tndbload,
932 		    tsol_mlpent_t *, &tsme);
933 		if (tsme.tsme_mlp.mlp_ipp == 0 || tsme.tsme_mlp.mlp_port == 0 ||
934 		    tsme.tsme_mlp.mlp_port > tsme.tsme_mlp.mlp_port_upper) {
935 			retv = EINVAL;
936 			break;
937 		}
938 		retv = mlp_add_del(mlpl, tsme.tsme_zoneid,
939 		    tsme.tsme_mlp.mlp_ipp, tsme.tsme_mlp.mlp_port,
940 		    tsme.tsme_mlp.mlp_port_upper, B_TRUE);
941 		break;
942 
943 	case TNDB_GET:
944 		DTRACE_PROBE1(tx__tndb__l2__tnmlp__tndbget,
945 		    tsol_mlpent_t *, &tsme);
946 
947 		/*
948 		 * Search for the requested element or, failing that, the one
949 		 * that's logically next in the sequence.
950 		 */
951 		rw_enter(&mlpl->mlpl_rwlock, RW_READER);
952 		for (tme = mlpl->mlpl_first; tme != NULL;
953 		    tme = tme->mlpe_next) {
954 			if (tsme.tsme_zoneid != ALL_ZONES &&
955 			    tme->mlpe_zoneid != tsme.tsme_zoneid)
956 				continue;
957 			if (tme->mlpe_mlp.mlp_ipp >= tsme.tsme_mlp.mlp_ipp &&
958 			    tme->mlpe_mlp.mlp_port == tsme.tsme_mlp.mlp_port)
959 				break;
960 			if (tme->mlpe_mlp.mlp_port > tsme.tsme_mlp.mlp_port)
961 				break;
962 		}
963 		if (tme == NULL) {
964 			retv = ENOENT;
965 		} else {
966 			tsme.tsme_zoneid = tme->mlpe_zoneid;
967 			tsme.tsme_mlp = tme->mlpe_mlp;
968 			retv = 0;
969 		}
970 		rw_exit(&mlpl->mlpl_rwlock);
971 		break;
972 
973 	case TNDB_DELETE:
974 		DTRACE_PROBE1(tx__tndb__l4__tnmlp__tndbdelete,
975 		    tsol_mlpent_t *, &tsme);
976 		retv = mlp_add_del(mlpl, tsme.tsme_zoneid,
977 		    tsme.tsme_mlp.mlp_ipp, tsme.tsme_mlp.mlp_port,
978 		    tsme.tsme_mlp.mlp_port_upper, B_FALSE);
979 		break;
980 
981 	case TNDB_FLUSH:
982 		DTRACE_PROBE1(tx__tndb__l4__tnmlp__tndbflush,
983 		    tsol_mlpent_t *, &tsme);
984 		mlp_flush(mlpl, ALL_ZONES);
985 		mlp_flush(&shared_mlps, tsme.tsme_zoneid);
986 		retv = 0;
987 		break;
988 
989 	default:
990 		DTRACE_PROBE1(tx__tndb__l0__tnmlp__unknowncmd, int,
991 		    cmd);
992 		retv = EOPNOTSUPP;
993 		break;
994 	}
995 
996 	if (zone != NULL)
997 		zone_rele(zone);
998 
999 	if (cmd == TNDB_GET && retv == 0) {
1000 		/* Copy out result */
1001 		if (copyout(&tsme, buf, sizeof (tsme)) != 0) {
1002 			DTRACE_PROBE(tx__tndb__l0__tnmlp__copyout);
1003 			retv = EFAULT;
1004 		}
1005 	}
1006 
1007 	if (retv != 0)
1008 		return (set_errno(retv));
1009 	else
1010 		return (retv);
1011 }
1012 
1013 /*
1014  * Returns a tnrhc matching the addr address.
1015  * The returned rhc's refcnt is incremented.
1016  */
1017 tsol_tnrhc_t *
1018 find_rhc_v4(const in_addr_t *in4)
1019 {
1020 	tsol_tnrhc_t *rh = NULL;
1021 	tnrhc_hash_t *tnrhc_hash;
1022 	ipaddr_t tmpmask;
1023 	int	i;
1024 
1025 	for (i = (TSOL_MASK_TABLE_SIZE - 1); i >= 0; i--) {
1026 
1027 		if ((tnrhc_table[i]) == NULL)
1028 			continue;
1029 
1030 		tmpmask = tsol_plen_to_mask(i);
1031 		tnrhc_hash = &tnrhc_table[i][
1032 		    TSOL_ADDR_HASH(*in4 & tmpmask, TNRHC_SIZE)];
1033 
1034 		mutex_enter(&tnrhc_hash->tnrh_lock);
1035 		for (rh = tnrhc_hash->tnrh_list; rh != NULL;
1036 		    rh = rh->rhc_next) {
1037 			if ((rh->rhc_host.ta_family == AF_INET) &&
1038 			    ((rh->rhc_host.ta_addr_v4.s_addr & tmpmask) ==
1039 			    (*in4 & tmpmask))) {
1040 				TNRHC_HOLD(rh);
1041 				mutex_exit(&tnrhc_hash->tnrh_lock);
1042 				return (rh);
1043 			}
1044 		}
1045 		mutex_exit(&tnrhc_hash->tnrh_lock);
1046 	}
1047 
1048 	return (NULL);
1049 }
1050 
1051 /*
1052  * Returns a tnrhc matching the addr address.
1053  * The returned rhc's refcnt is incremented.
1054  */
1055 tsol_tnrhc_t *
1056 find_rhc_v6(const in6_addr_t *in6)
1057 {
1058 	tsol_tnrhc_t *rh = NULL;
1059 	tnrhc_hash_t *tnrhc_hash;
1060 	in6_addr_t tmpmask;
1061 	int i;
1062 
1063 	if (IN6_IS_ADDR_V4MAPPED(in6)) {
1064 		in_addr_t in4;
1065 
1066 		IN6_V4MAPPED_TO_IPADDR(in6, in4);
1067 		return (find_rhc_v4(&in4));
1068 	}
1069 
1070 	for (i = (TSOL_MASK_TABLE_SIZE_V6 - 1); i >= 0; i--) {
1071 		if ((tnrhc_table_v6[i]) == NULL)
1072 			continue;
1073 
1074 		tsol_plen_to_mask_v6(i, &tmpmask);
1075 		tnrhc_hash = &tnrhc_table_v6[i][
1076 		    TSOL_ADDR_MASK_HASH_V6(*in6, tmpmask, TNRHC_SIZE)];
1077 
1078 		mutex_enter(&tnrhc_hash->tnrh_lock);
1079 		for (rh = tnrhc_hash->tnrh_list; rh != NULL;
1080 		    rh = rh->rhc_next) {
1081 			if ((rh->rhc_host.ta_family == AF_INET6) &&
1082 			    V6_MASK_EQ_2(rh->rhc_host.ta_addr_v6, tmpmask,
1083 			    *in6)) {
1084 				TNRHC_HOLD(rh);
1085 				mutex_exit(&tnrhc_hash->tnrh_lock);
1086 				return (rh);
1087 			}
1088 		}
1089 		mutex_exit(&tnrhc_hash->tnrh_lock);
1090 	}
1091 
1092 	return (NULL);
1093 }
1094 
1095 tsol_tpc_t *
1096 find_tpc(const void *addr, uchar_t version, boolean_t staleok)
1097 {
1098 	tsol_tpc_t *tpc;
1099 	tsol_tnrhc_t *rhc;
1100 
1101 	if (version == IPV4_VERSION)
1102 		rhc = find_rhc_v4(addr);
1103 	else
1104 		rhc = find_rhc_v6(addr);
1105 
1106 	if (rhc != NULL) {
1107 		tpc = rhc->rhc_tpc;
1108 		if (!staleok && tpc->tpc_invalid) {
1109 			/*
1110 			 * This should not happen unless the user deletes
1111 			 * templates without recreating them.  Try to find the
1112 			 * new version of template.  If there is none, then
1113 			 * just give up.
1114 			 */
1115 			tpc = tnrhtp_find(tpc->tpc_tp.name, tpc_name_hash);
1116 			if (tpc != NULL) {
1117 				TPC_RELE(rhc->rhc_tpc);
1118 				rhc->rhc_tpc = tpc;
1119 			}
1120 		}
1121 		if (tpc != NULL)
1122 			TPC_HOLD(tpc);
1123 		TNRHC_RELE(rhc);
1124 		return (tpc);
1125 	}
1126 	DTRACE_PROBE(tx__tndb__l1__findtpc__notemplate);
1127 	return (NULL);
1128 }
1129 
1130 /*
1131  * create an internal template called "_unlab":
1132  *
1133  * _unlab;\
1134  *	host_type = unlabeled;\
1135  *	def_label = ADMIN_LOW[ADMIN_LOW];\
1136  *	min_sl = ADMIN_LOW;\
1137  *	max_sl = ADMIN_HIGH;
1138  */
1139 static void
1140 tsol_create_i_tmpls(void)
1141 {
1142 	tsol_tpent_t rhtpent;
1143 
1144 	bzero(&rhtpent, sizeof (rhtpent));
1145 
1146 	/* create _unlab */
1147 	(void) strcpy(rhtpent.name, "_unlab");
1148 
1149 	rhtpent.host_type = UNLABELED;
1150 	rhtpent.tp_mask_unl = TSOL_MSK_DEF_LABEL | TSOL_MSK_DEF_CL |
1151 	    TSOL_MSK_SL_RANGE_TSOL;
1152 
1153 	rhtpent.tp_gw_sl_range.lower_bound = *label2bslabel(l_admin_low);
1154 	rhtpent.tp_def_label = rhtpent.tp_gw_sl_range.lower_bound;
1155 	rhtpent.tp_gw_sl_range.upper_bound = *label2bslabel(l_admin_high);
1156 	rhtpent.tp_cipso_doi_unl = default_doi;
1157 	tpc_unlab = tnrhtp_create(&rhtpent, KM_SLEEP);
1158 }
1159 
1160 /*
1161  * set up internal host template, called from kernel only.
1162  */
1163 static void
1164 tsol_create_i_tnrh(const tnaddr_t *sa)
1165 {
1166 	tsol_tnrhc_t *rh, *new;
1167 	tnrhc_hash_t *tnrhc_hash;
1168 
1169 	/* Allocate a new entry before taking the lock */
1170 	new = kmem_zalloc(sizeof (*new), KM_SLEEP);
1171 
1172 	tnrhc_hash = (sa->ta_family == AF_INET) ? &tnrhc_table[0][0] :
1173 	    &tnrhc_table_v6[0][0];
1174 
1175 	mutex_enter(&tnrhc_hash->tnrh_lock);
1176 	rh = tnrhc_hash->tnrh_list;
1177 
1178 	if (rh == NULL) {
1179 		/* We're keeping the new entry. */
1180 		rh = new;
1181 		new = NULL;
1182 		rh->rhc_host = *sa;
1183 		mutex_init(&rh->rhc_lock, NULL, MUTEX_DEFAULT, NULL);
1184 		TNRHC_HOLD(rh);
1185 		tnrhc_hash->tnrh_list = rh;
1186 	}
1187 
1188 	/*
1189 	 * Link the entry to internal_unlab
1190 	 */
1191 	if (rh->rhc_tpc != tpc_unlab) {
1192 		if (rh->rhc_tpc != NULL)
1193 			TPC_RELE(rh->rhc_tpc);
1194 		rh->rhc_tpc = tpc_unlab;
1195 		TPC_HOLD(tpc_unlab);
1196 	}
1197 	mutex_exit(&tnrhc_hash->tnrh_lock);
1198 	if (new != NULL)
1199 		kmem_free(new, sizeof (*new));
1200 }
1201 
1202 /*
1203  * Returns 0 if the port is known to be SLP.  Returns next possible port number
1204  * (wrapping through 1) if port is MLP on shared or global.  Administrator
1205  * should not make all ports MLP.  If that's done, then we'll just pretend
1206  * everything is SLP to avoid looping forever.
1207  *
1208  * Note: port is in host byte order.
1209  */
1210 in_port_t
1211 tsol_next_port(zone_t *zone, in_port_t port, int proto, boolean_t upward)
1212 {
1213 	boolean_t loop;
1214 	tsol_mlp_entry_t *tme;
1215 	int newport = port;
1216 
1217 	loop = B_FALSE;
1218 	for (;;) {
1219 		if (zone != NULL && zone->zone_mlps.mlpl_first != NULL) {
1220 			rw_enter(&zone->zone_mlps.mlpl_rwlock, RW_READER);
1221 			for (tme = zone->zone_mlps.mlpl_first; tme != NULL;
1222 			    tme = tme->mlpe_next) {
1223 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1224 				    newport >= tme->mlpe_mlp.mlp_port &&
1225 				    newport <= tme->mlpe_mlp.mlp_port_upper)
1226 					newport = upward ?
1227 					    tme->mlpe_mlp.mlp_port_upper + 1 :
1228 					    tme->mlpe_mlp.mlp_port - 1;
1229 			}
1230 			rw_exit(&zone->zone_mlps.mlpl_rwlock);
1231 		}
1232 		if (shared_mlps.mlpl_first != NULL) {
1233 			rw_enter(&shared_mlps.mlpl_rwlock, RW_READER);
1234 			for (tme = shared_mlps.mlpl_first; tme != NULL;
1235 			    tme = tme->mlpe_next) {
1236 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1237 				    newport >= tme->mlpe_mlp.mlp_port &&
1238 				    newport <= tme->mlpe_mlp.mlp_port_upper)
1239 					newport = upward ?
1240 					    tme->mlpe_mlp.mlp_port_upper + 1 :
1241 					    tme->mlpe_mlp.mlp_port - 1;
1242 			}
1243 			rw_exit(&shared_mlps.mlpl_rwlock);
1244 		}
1245 		if (newport <= 65535 && newport > 0)
1246 			break;
1247 		if (loop)
1248 			return (0);
1249 		loop = B_TRUE;
1250 		newport = upward ? 1 : 65535;
1251 	}
1252 	return (newport == port ? 0 : newport);
1253 }
1254 
1255 /*
1256  * tsol_mlp_port_type will check if the given (zone, proto, port) is a
1257  * multilevel port.  If it is, return the type (shared, private, or both), or
1258  * indicate that it's single-level.
1259  *
1260  * Note: port is given in host byte order, not network byte order.
1261  */
1262 mlp_type_t
1263 tsol_mlp_port_type(zone_t *zone, uchar_t proto, uint16_t port,
1264     mlp_type_t mlptype)
1265 {
1266 	tsol_mlp_entry_t *tme;
1267 
1268 	if (mlptype == mlptBoth || mlptype == mlptPrivate) {
1269 		tme = NULL;
1270 		if (zone->zone_mlps.mlpl_first != NULL) {
1271 			rw_enter(&zone->zone_mlps.mlpl_rwlock, RW_READER);
1272 			for (tme = zone->zone_mlps.mlpl_first; tme != NULL;
1273 			    tme = tme->mlpe_next) {
1274 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1275 				    port >= tme->mlpe_mlp.mlp_port &&
1276 				    port <= tme->mlpe_mlp.mlp_port_upper)
1277 					break;
1278 			}
1279 			rw_exit(&zone->zone_mlps.mlpl_rwlock);
1280 		}
1281 		if (tme == NULL) {
1282 			if (mlptype == mlptBoth)
1283 				mlptype = mlptShared;
1284 			else if (mlptype == mlptPrivate)
1285 				mlptype = mlptSingle;
1286 		}
1287 	}
1288 	if (mlptype == mlptBoth || mlptype == mlptShared) {
1289 		tme = NULL;
1290 		if (shared_mlps.mlpl_first != NULL) {
1291 			rw_enter(&shared_mlps.mlpl_rwlock, RW_READER);
1292 			for (tme = shared_mlps.mlpl_first; tme != NULL;
1293 			    tme = tme->mlpe_next) {
1294 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1295 				    port >= tme->mlpe_mlp.mlp_port &&
1296 				    port <= tme->mlpe_mlp.mlp_port_upper)
1297 					break;
1298 			}
1299 			rw_exit(&shared_mlps.mlpl_rwlock);
1300 		}
1301 		if (tme == NULL) {
1302 			if (mlptype == mlptBoth)
1303 				mlptype = mlptPrivate;
1304 			else if (mlptype == mlptShared)
1305 				mlptype = mlptSingle;
1306 		}
1307 	}
1308 	return (mlptype);
1309 }
1310 
1311 /*
1312  * tsol_mlp_findzone will check if the given (proto, port) is a multilevel port
1313  * on a shared address.  If it is, return the owning zone.
1314  *
1315  * Note: lport is in network byte order, unlike the other MLP functions,
1316  * because the callers of this function are all dealing with packets off the
1317  * wire.
1318  */
1319 zoneid_t
1320 tsol_mlp_findzone(uchar_t proto, uint16_t lport)
1321 {
1322 	tsol_mlp_entry_t *tme;
1323 	zoneid_t zoneid;
1324 	uint16_t port;
1325 
1326 	if (shared_mlps.mlpl_first == NULL)
1327 		return (ALL_ZONES);
1328 	port = ntohs(lport);
1329 	rw_enter(&shared_mlps.mlpl_rwlock, RW_READER);
1330 	for (tme = shared_mlps.mlpl_first; tme != NULL; tme = tme->mlpe_next) {
1331 		if (proto == tme->mlpe_mlp.mlp_ipp &&
1332 		    port >= tme->mlpe_mlp.mlp_port &&
1333 		    port <= tme->mlpe_mlp.mlp_port_upper)
1334 			break;
1335 	}
1336 	zoneid = tme == NULL ? ALL_ZONES : tme->mlpe_zoneid;
1337 	rw_exit(&shared_mlps.mlpl_rwlock);
1338 	return (zoneid);
1339 }
1340 
1341 /* Debug routine */
1342 void
1343 tsol_print_label(const blevel_t *blev, const char *name)
1344 {
1345 	const _blevel_impl_t *bli = (const _blevel_impl_t *)blev;
1346 
1347 	/* We really support only sensitivity labels */
1348 	cmn_err(CE_NOTE, "%s %x:%x:%08x%08x%08x%08x%08x%08x%08x%08x",
1349 	    name, bli->id, LCLASS(bli), ntohl(bli->_comps.c1),
1350 	    ntohl(bli->_comps.c2), ntohl(bli->_comps.c3), ntohl(bli->_comps.c4),
1351 	    ntohl(bli->_comps.c5), ntohl(bli->_comps.c6), ntohl(bli->_comps.c7),
1352 	    ntohl(bli->_comps.c8));
1353 }
1354 
1355 /*
1356  * Name:	labelsys()
1357  *
1358  * Normal:	Routes TSOL syscalls.
1359  *
1360  * Output:	As defined for each TSOL syscall.
1361  *		Returns ENOSYS for unrecognized calls.
1362  */
1363 /* ARGSUSED */
1364 int
1365 labelsys(int op, void *a1, void *a2, void *a3, void *a4, void *a5)
1366 {
1367 	switch (op) {
1368 	case TSOL_SYSLABELING:
1369 		return (sys_labeling);
1370 	case TSOL_TNRH:
1371 		return (tnrh((int)(uintptr_t)a1, a2));
1372 	case TSOL_TNRHTP:
1373 		return (tnrhtp((int)(uintptr_t)a1, a2));
1374 	case TSOL_TNMLP:
1375 		return (tnmlp((int)(uintptr_t)a1, a2));
1376 	case TSOL_GETLABEL:
1377 		return (getlabel((char *)a1, (bslabel_t *)a2));
1378 	case TSOL_FGETLABEL:
1379 		return (fgetlabel((int)(uintptr_t)a1, (bslabel_t *)a2));
1380 	default:
1381 		return (set_errno(ENOSYS));
1382 	}
1383 	/* NOTREACHED */
1384 }
1385