xref: /illumos-gate/usr/src/uts/common/os/labelsys.c (revision 2d6eb4a5e0a47d30189497241345dc5466bb68ab)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <sys/systm.h>
27 #include <sys/types.h>
28 #include <sys/stream.h>
29 #include <sys/kmem.h>
30 #include <sys/strsubr.h>
31 #include <sys/cmn_err.h>
32 #include <sys/debug.h>
33 #include <sys/param.h>
34 #include <sys/model.h>
35 #include <sys/errno.h>
36 #include <sys/modhash.h>
37 
38 #include <sys/policy.h>
39 #include <sys/tsol/label.h>
40 #include <sys/tsol/tsyscall.h>
41 #include <sys/tsol/tndb.h>
42 #include <sys/tsol/tnet.h>
43 #include <sys/disp.h>
44 
45 #include <inet/ip.h>
46 #include <inet/ip6.h>
47 #include <sys/sdt.h>
48 
49 static mod_hash_t *tpc_name_hash;	/* hash of cache entries by name */
50 static kmutex_t tpc_lock;
51 
52 static tsol_tpc_t *tpc_unlab;
53 
54 /*
55  * tnrhc_table and tnrhc_table_v6 are similar to the IP forwarding tables
56  * in organization and search. The tnrhc_table[_v6] is an array of 33/129
57  * pointers to the 33/129 tnrhc tables indexed by the prefix length.
58  * A largest prefix match search is done by find_rhc and it walks the
59  * tables from the most specific to the least specific table. Table 0
60  * corresponds to the single entry for 0.0.0.0/0 or ::0/0.
61  */
62 tnrhc_hash_t *tnrhc_table[TSOL_MASK_TABLE_SIZE];
63 tnrhc_hash_t *tnrhc_table_v6[TSOL_MASK_TABLE_SIZE_V6];
64 kmutex_t tnrhc_g_lock;
65 
66 static void tsol_create_i_tmpls(void);
67 
68 static void tsol_create_i_tnrh(const tnaddr_t *);
69 
70 /* List of MLPs on valid on shared addresses */
71 static tsol_mlp_list_t shared_mlps;
72 
73 /*
74  * Convert length for a mask to the mask.
75  */
76 static ipaddr_t
tsol_plen_to_mask(uint_t masklen)77 tsol_plen_to_mask(uint_t masklen)
78 {
79 	return (masklen == 0 ? 0 : htonl(IP_HOST_MASK << (IP_ABITS - masklen)));
80 }
81 
82 /*
83  * Convert a prefix length to the mask for that prefix.
84  * Returns the argument bitmask.
85  */
86 static void
tsol_plen_to_mask_v6(uint_t plen,in6_addr_t * bitmask)87 tsol_plen_to_mask_v6(uint_t plen, in6_addr_t *bitmask)
88 {
89 	uint32_t *ptr;
90 
91 	ASSERT(plen <= IPV6_ABITS);
92 
93 	ptr = (uint32_t *)bitmask;
94 	while (plen >= 32) {
95 		*ptr++ = 0xffffffffU;
96 		plen -= 32;
97 	}
98 	if (plen > 0)
99 		*ptr++ = htonl(0xffffffff << (32 - plen));
100 	while (ptr < (uint32_t *)(bitmask + 1))
101 		*ptr++ = 0;
102 }
103 
104 boolean_t
tnrhc_init_table(tnrhc_hash_t * table[],short prefix_len,int kmflag)105 tnrhc_init_table(tnrhc_hash_t *table[], short prefix_len, int kmflag)
106 {
107 	int	i;
108 
109 	mutex_enter(&tnrhc_g_lock);
110 
111 	if (table[prefix_len] == NULL) {
112 		table[prefix_len] = (tnrhc_hash_t *)
113 		    kmem_zalloc(TNRHC_SIZE * sizeof (tnrhc_hash_t), kmflag);
114 		if (table[prefix_len] == NULL) {
115 			mutex_exit(&tnrhc_g_lock);
116 			return (B_FALSE);
117 		}
118 		for (i = 0; i < TNRHC_SIZE; i++) {
119 			mutex_init(&table[prefix_len][i].tnrh_lock,
120 			    NULL, MUTEX_DEFAULT, 0);
121 		}
122 	}
123 	mutex_exit(&tnrhc_g_lock);
124 	return (B_TRUE);
125 }
126 
127 void
tcache_init(void)128 tcache_init(void)
129 {
130 	tnaddr_t address;
131 
132 	/*
133 	 * Note: unable to use mod_hash_create_strhash here, since it's
134 	 * assymetric.  It assumes that the user has allocated exactly
135 	 * strlen(key) + 1 bytes for the key when inserted, and attempts to
136 	 * kmem_free that memory on a delete.
137 	 */
138 	tpc_name_hash = mod_hash_create_extended("tnrhtpc_by_name", 256,
139 	    mod_hash_null_keydtor,  mod_hash_null_valdtor, mod_hash_bystr,
140 	    NULL, mod_hash_strkey_cmp, KM_SLEEP);
141 	mutex_init(&tpc_lock, NULL, MUTEX_DEFAULT, NULL);
142 
143 	mutex_init(&tnrhc_g_lock, NULL, MUTEX_DEFAULT, NULL);
144 
145 	/* label_init always called before tcache_init */
146 	ASSERT(l_admin_low != NULL && l_admin_high != NULL);
147 
148 	/* Initialize the zeroth table prior to loading the 0.0.0.0 entry */
149 	(void) tnrhc_init_table(tnrhc_table, 0, KM_SLEEP);
150 	(void) tnrhc_init_table(tnrhc_table_v6, 0, KM_SLEEP);
151 	/*
152 	 * create an internal host template called "_unlab"
153 	 */
154 	tsol_create_i_tmpls();
155 
156 	/*
157 	 * create a host entry, 0.0.0.0 = _unlab
158 	 */
159 	bzero(&address, sizeof (tnaddr_t));
160 	address.ta_family = AF_INET;
161 	tsol_create_i_tnrh(&address);
162 
163 	/*
164 	 * create a host entry, ::0 = _unlab
165 	 */
166 	address.ta_family = AF_INET6;
167 	tsol_create_i_tnrh(&address);
168 
169 	rw_init(&shared_mlps.mlpl_rwlock, NULL, RW_DEFAULT, NULL);
170 }
171 
172 /* Called only by the TNRHC_RELE macro when the refcount goes to zero. */
173 void
tnrhc_free(tsol_tnrhc_t * tnrhc)174 tnrhc_free(tsol_tnrhc_t *tnrhc)
175 {
176 	/*
177 	 * We assert rhc_invalid here to make sure that no new thread could
178 	 * possibly end up finding this entry.  If it could, then the
179 	 * mutex_destroy would panic.
180 	 */
181 	DTRACE_PROBE1(tx__tndb__l3__tnrhcfree, tsol_tnrhc_t *, tnrhc);
182 	ASSERT(tnrhc->rhc_next == NULL && tnrhc->rhc_invalid);
183 	mutex_exit(&tnrhc->rhc_lock);
184 	mutex_destroy(&tnrhc->rhc_lock);
185 	if (tnrhc->rhc_tpc != NULL)
186 		TPC_RELE(tnrhc->rhc_tpc);
187 	kmem_free(tnrhc, sizeof (*tnrhc));
188 }
189 
190 /* Called only by the TPC_RELE macro when the refcount goes to zero. */
191 void
tpc_free(tsol_tpc_t * tpc)192 tpc_free(tsol_tpc_t *tpc)
193 {
194 	DTRACE_PROBE1(tx__tndb__l3__tpcfree, tsol_tpc_t *, tpc);
195 	ASSERT(tpc->tpc_invalid);
196 	mutex_exit(&tpc->tpc_lock);
197 	mutex_destroy(&tpc->tpc_lock);
198 	kmem_free(tpc, sizeof (*tpc));
199 }
200 
201 /*
202  * Find and hold a reference to a template entry by name.  Ignores entries that
203  * are being deleted.
204  */
205 static tsol_tpc_t *
tnrhtp_find(const char * name,mod_hash_t * hash)206 tnrhtp_find(const char *name, mod_hash_t *hash)
207 {
208 	mod_hash_val_t hv;
209 	tsol_tpc_t *tpc = NULL;
210 
211 	mutex_enter(&tpc_lock);
212 	if (mod_hash_find(hash, (mod_hash_key_t)name, &hv) == 0) {
213 		tpc = (tsol_tpc_t *)hv;
214 		if (tpc->tpc_invalid)
215 			tpc = NULL;
216 		else
217 			TPC_HOLD(tpc);
218 	}
219 	mutex_exit(&tpc_lock);
220 	return (tpc);
221 }
222 
223 static int
tnrh_delete(const tsol_rhent_t * rhent)224 tnrh_delete(const tsol_rhent_t *rhent)
225 {
226 	tsol_tnrhc_t *current;
227 	tsol_tnrhc_t **prevp;
228 	ipaddr_t tmpmask;
229 	in6_addr_t tmpmask_v6;
230 	tnrhc_hash_t *tnrhc_hash;
231 
232 	if (rhent->rh_address.ta_family == AF_INET) {
233 		if (rhent->rh_prefix < 0 || rhent->rh_prefix > IP_ABITS)
234 			return (EINVAL);
235 		if (tnrhc_table[rhent->rh_prefix] == NULL)
236 			return (ENOENT);
237 		tmpmask = tsol_plen_to_mask(rhent->rh_prefix);
238 		tnrhc_hash = &tnrhc_table[rhent->rh_prefix][
239 		    TSOL_ADDR_HASH(rhent->rh_address.ta_addr_v4.s_addr &
240 		    tmpmask, TNRHC_SIZE)];
241 	} else if (rhent->rh_address.ta_family == AF_INET6) {
242 		if (rhent->rh_prefix < 0 || rhent->rh_prefix > IPV6_ABITS)
243 			return (EINVAL);
244 		if (tnrhc_table_v6[rhent->rh_prefix] == NULL)
245 			return (ENOENT);
246 		tsol_plen_to_mask_v6(rhent->rh_prefix, &tmpmask_v6);
247 		tnrhc_hash = &tnrhc_table_v6[rhent->rh_prefix][
248 		    TSOL_ADDR_MASK_HASH_V6(rhent->rh_address.ta_addr_v6,
249 		    tmpmask_v6, TNRHC_SIZE)];
250 	} else {
251 		return (EAFNOSUPPORT);
252 	}
253 
254 	/* search for existing entry */
255 	mutex_enter(&tnrhc_hash->tnrh_lock);
256 	prevp = &tnrhc_hash->tnrh_list;
257 	while ((current = *prevp) != NULL) {
258 		if (TNADDR_EQ(&rhent->rh_address, &current->rhc_host))
259 			break;
260 		prevp = &current->rhc_next;
261 	}
262 
263 	if (current != NULL) {
264 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete_existingrhentry);
265 		*prevp = current->rhc_next;
266 		mutex_enter(&current->rhc_lock);
267 		current->rhc_next = NULL;
268 		current->rhc_invalid = 1;
269 		mutex_exit(&current->rhc_lock);
270 		TNRHC_RELE(current);
271 	}
272 	mutex_exit(&tnrhc_hash->tnrh_lock);
273 	return (current == NULL ? ENOENT : 0);
274 }
275 
276 /*
277  * Flush all remote host entries from the database.
278  *
279  * Note that the htable arrays themselves do not have reference counters, so,
280  * unlike the remote host entries, they cannot be freed.
281  */
282 static void
flush_rh_table(tnrhc_hash_t ** htable,int nbits)283 flush_rh_table(tnrhc_hash_t **htable, int nbits)
284 {
285 	tnrhc_hash_t *hent, *hend;
286 	tsol_tnrhc_t *rhc, *rhnext;
287 
288 	while (--nbits >= 0) {
289 		if ((hent = htable[nbits]) == NULL)
290 			continue;
291 		hend = hent + TNRHC_SIZE;
292 		while (hent < hend) {
293 			/*
294 			 * List walkers hold this lock during the walk.  It
295 			 * protects tnrh_list and rhc_next.
296 			 */
297 			mutex_enter(&hent->tnrh_lock);
298 			rhnext = hent->tnrh_list;
299 			hent->tnrh_list = NULL;
300 			mutex_exit(&hent->tnrh_lock);
301 			/*
302 			 * There may still be users of the rhcs at this point,
303 			 * but not of the list or its next pointer.  Thus, the
304 			 * only thing that would need to be done under a lock
305 			 * is setting the invalid bit, but that's atomic
306 			 * anyway, so no locks needed here.
307 			 */
308 			while ((rhc = rhnext) != NULL) {
309 				rhnext = rhc->rhc_next;
310 				rhc->rhc_next = NULL;
311 				rhc->rhc_invalid = 1;
312 				TNRHC_RELE(rhc);
313 			}
314 			hent++;
315 		}
316 	}
317 }
318 
319 /*
320  * Load a remote host entry into kernel cache.  Create a new one if a matching
321  * entry isn't found, otherwise replace the contents of the previous one by
322  * deleting it and recreating it.  (Delete and recreate is used to avoid
323  * allowing other threads to see an unstable data structure.)
324  *
325  * A "matching" entry is the one whose address matches that of the one
326  * being loaded.
327  *
328  * Return 0 for success, error code for failure.
329  */
330 static int
tnrh_hash_add(tsol_tnrhc_t * new,short prefix)331 tnrh_hash_add(tsol_tnrhc_t *new, short prefix)
332 {
333 	tsol_tnrhc_t **rhp;
334 	tsol_tnrhc_t *rh;
335 	ipaddr_t tmpmask;
336 	in6_addr_t tmpmask_v6;
337 	tnrhc_hash_t *tnrhc_hash;
338 
339 	/* Find the existing entry, if any, leaving the hash locked */
340 	if (new->rhc_host.ta_family == AF_INET) {
341 		if (prefix < 0 || prefix > IP_ABITS)
342 			return (EINVAL);
343 		if (tnrhc_table[prefix] == NULL &&
344 		    !tnrhc_init_table(tnrhc_table, prefix,
345 		    KM_NOSLEEP))
346 			return (ENOMEM);
347 		tmpmask = tsol_plen_to_mask(prefix);
348 		tnrhc_hash = &tnrhc_table[prefix][
349 		    TSOL_ADDR_HASH(new->rhc_host.ta_addr_v4.s_addr &
350 		    tmpmask, TNRHC_SIZE)];
351 		mutex_enter(&tnrhc_hash->tnrh_lock);
352 		for (rhp = &tnrhc_hash->tnrh_list; (rh = *rhp) != NULL;
353 		    rhp = &rh->rhc_next) {
354 			ASSERT(rh->rhc_host.ta_family == AF_INET);
355 			if (((rh->rhc_host.ta_addr_v4.s_addr ^
356 			    new->rhc_host.ta_addr_v4.s_addr) & tmpmask) ==
357 			    0)
358 				break;
359 		}
360 	} else if (new->rhc_host.ta_family == AF_INET6) {
361 		if (prefix < 0 || prefix > IPV6_ABITS)
362 			return (EINVAL);
363 		if (tnrhc_table_v6[prefix] == NULL &&
364 		    !tnrhc_init_table(tnrhc_table_v6, prefix,
365 		    KM_NOSLEEP))
366 			return (ENOMEM);
367 		tsol_plen_to_mask_v6(prefix, &tmpmask_v6);
368 		tnrhc_hash = &tnrhc_table_v6[prefix][
369 		    TSOL_ADDR_MASK_HASH_V6(new->rhc_host.ta_addr_v6,
370 		    tmpmask_v6, TNRHC_SIZE)];
371 		mutex_enter(&tnrhc_hash->tnrh_lock);
372 		for (rhp = &tnrhc_hash->tnrh_list; (rh = *rhp) != NULL;
373 		    rhp = &rh->rhc_next) {
374 			ASSERT(rh->rhc_host.ta_family == AF_INET6);
375 			if (V6_MASK_EQ_2(rh->rhc_host.ta_addr_v6, tmpmask_v6,
376 			    new->rhc_host.ta_addr_v6))
377 				break;
378 		}
379 	} else {
380 		return (EAFNOSUPPORT);
381 	}
382 
383 	/* Clobber the old remote host entry. */
384 	if (rh != NULL) {
385 		ASSERT(!rh->rhc_invalid);
386 		rh->rhc_invalid = 1;
387 		*rhp = rh->rhc_next;
388 		rh->rhc_next = NULL;
389 		DTRACE_PROBE1(tx__tndb__l2__tnrhhashadd__invalidaterh,
390 		    tsol_tnrhc_t *, rh);
391 		TNRHC_RELE(rh);
392 	}
393 
394 	TNRHC_HOLD(new);
395 	new->rhc_next = tnrhc_hash->tnrh_list;
396 	tnrhc_hash->tnrh_list = new;
397 	DTRACE_PROBE1(tx__tndb__l2__tnrhhashadd__addedrh, tsol_tnrhc_t *, new);
398 	mutex_exit(&tnrhc_hash->tnrh_lock);
399 
400 	return (0);
401 }
402 
403 /*
404  * Load a remote host entry into kernel cache.
405  *
406  * Return 0 for success, error code for failure.
407  */
408 int
tnrh_load(const tsol_rhent_t * rhent)409 tnrh_load(const tsol_rhent_t *rhent)
410 {
411 	tsol_tnrhc_t *new;
412 	tsol_tpc_t *tpc;
413 	int status;
414 
415 	/* Find and bump the reference count on the named template */
416 	if ((tpc = tnrhtp_find(rhent->rh_template, tpc_name_hash)) == NULL) {
417 		return (EINVAL);
418 	}
419 	ASSERT(tpc->tpc_tp.host_type == UNLABELED ||
420 	    tpc->tpc_tp.host_type == SUN_CIPSO);
421 
422 	if ((new = kmem_zalloc(sizeof (*new), KM_NOSLEEP)) == NULL) {
423 		TPC_RELE(tpc);
424 		return (ENOMEM);
425 	}
426 
427 	/* Initialize the new entry. */
428 	mutex_init(&new->rhc_lock, NULL, MUTEX_DEFAULT, NULL);
429 	new->rhc_host = rhent->rh_address;
430 
431 	/* The rhc now owns this tpc reference, so no TPC_RELE past here */
432 	new->rhc_tpc = tpc;
433 
434 	/*
435 	 * tnrh_hash_add handles the tnrh entry ref count for hash
436 	 * table inclusion. The ref count is incremented and decremented
437 	 * here to trigger deletion of the new hash table entry in the
438 	 * event that tnrh_hash_add fails.
439 	 */
440 	TNRHC_HOLD(new);
441 	status = tnrh_hash_add(new, rhent->rh_prefix);
442 	TNRHC_RELE(new);
443 
444 	return (status);
445 }
446 
447 static int
tnrh_get(tsol_rhent_t * rhent)448 tnrh_get(tsol_rhent_t *rhent)
449 {
450 	tsol_tpc_t *tpc;
451 
452 	switch (rhent->rh_address.ta_family) {
453 	case AF_INET:
454 		tpc = find_tpc(&rhent->rh_address.ta_addr_v4, IPV4_VERSION,
455 		    B_TRUE);
456 		break;
457 
458 	case AF_INET6:
459 		tpc = find_tpc(&rhent->rh_address.ta_addr_v6, IPV6_VERSION,
460 		    B_TRUE);
461 		break;
462 
463 	default:
464 		return (EINVAL);
465 	}
466 	if (tpc == NULL)
467 		return (ENOENT);
468 
469 	DTRACE_PROBE2(tx__tndb__l4__tnrhget__foundtpc, tsol_rhent_t *,
470 	    rhent, tsol_tpc_t *, tpc);
471 	bcopy(tpc->tpc_tp.name, rhent->rh_template,
472 	    sizeof (rhent->rh_template));
473 	TPC_RELE(tpc);
474 	return (0);
475 }
476 
477 static boolean_t
template_name_ok(const char * name)478 template_name_ok(const char *name)
479 {
480 	const char *name_end = name + TNTNAMSIZ;
481 
482 	while (name < name_end) {
483 		if (*name == '\0')
484 			break;
485 		name++;
486 	}
487 	return (name < name_end);
488 }
489 
490 static int
tnrh(int cmd,void * buf)491 tnrh(int cmd, void *buf)
492 {
493 	int retv;
494 	tsol_rhent_t rhent;
495 
496 	/* Make sure user has sufficient privilege */
497 	if (cmd != TNDB_GET &&
498 	    (retv = secpolicy_net_config(CRED(), B_FALSE)) != 0)
499 		return (set_errno(retv));
500 
501 	/*
502 	 * Get arguments
503 	 */
504 	if (cmd != TNDB_FLUSH &&
505 	    copyin(buf, &rhent, sizeof (rhent)) != 0) {
506 		DTRACE_PROBE(tx__tndb__l0__tnrhdelete__copyin);
507 		return (set_errno(EFAULT));
508 	}
509 
510 	switch (cmd) {
511 	case TNDB_LOAD:
512 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete__tndbload);
513 		if (!template_name_ok(rhent.rh_template)) {
514 			retv = EINVAL;
515 		} else {
516 			retv = tnrh_load(&rhent);
517 		}
518 		break;
519 
520 	case TNDB_DELETE:
521 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete__tndbdelete);
522 		retv = tnrh_delete(&rhent);
523 		break;
524 
525 	case TNDB_GET:
526 		DTRACE_PROBE(tx__tndb__l4__tnrhdelete__tndbget);
527 		if (!template_name_ok(rhent.rh_template)) {
528 			retv = EINVAL;
529 			break;
530 		}
531 
532 		retv = tnrh_get(&rhent);
533 		if (retv != 0)
534 			break;
535 
536 		/*
537 		 * Copy out result
538 		 */
539 		if (copyout(&rhent, buf, sizeof (rhent)) != 0) {
540 			DTRACE_PROBE(tx__tndb__l0__tnrhdelete__copyout);
541 			retv = EFAULT;
542 		}
543 		break;
544 
545 	case TNDB_FLUSH:
546 		DTRACE_PROBE(tx__tndb__l2__tnrhdelete__flush);
547 		flush_rh_table(tnrhc_table, TSOL_MASK_TABLE_SIZE);
548 		flush_rh_table(tnrhc_table_v6, TSOL_MASK_TABLE_SIZE_V6);
549 		break;
550 
551 	default:
552 		DTRACE_PROBE1(tx__tndb__l0__tnrhdelete__unknowncmd,
553 		    int, cmd);
554 		retv = EOPNOTSUPP;
555 		break;
556 	}
557 
558 	if (retv != 0)
559 		return (set_errno(retv));
560 	else
561 		return (retv);
562 }
563 
564 static tsol_tpc_t *
tnrhtp_create(const tsol_tpent_t * tpent,int kmflags)565 tnrhtp_create(const tsol_tpent_t *tpent, int kmflags)
566 {
567 	tsol_tpc_t *tpc;
568 	mod_hash_val_t hv;
569 
570 	/*
571 	 * We intentionally allocate a new entry before taking the lock on the
572 	 * entire database.
573 	 */
574 	if ((tpc = kmem_zalloc(sizeof (*tpc), kmflags)) == NULL)
575 		return (NULL);
576 
577 	mutex_enter(&tpc_lock);
578 	if (mod_hash_find(tpc_name_hash, (mod_hash_key_t)tpent->name,
579 	    &hv) == 0) {
580 		tsol_tpc_t *found_tpc = (tsol_tpc_t *)hv;
581 
582 		found_tpc->tpc_invalid = 1;
583 		(void) mod_hash_destroy(tpc_name_hash,
584 		    (mod_hash_key_t)tpent->name);
585 		TPC_RELE(found_tpc);
586 	}
587 
588 	mutex_init(&tpc->tpc_lock, NULL, MUTEX_DEFAULT, NULL);
589 	/* tsol_tpent_t is the same on LP64 and ILP32 */
590 	bcopy(tpent, &tpc->tpc_tp, sizeof (tpc->tpc_tp));
591 	(void) mod_hash_insert(tpc_name_hash, (mod_hash_key_t)tpc->tpc_tp.name,
592 	    (mod_hash_val_t)tpc);
593 	TPC_HOLD(tpc);
594 	mutex_exit(&tpc_lock);
595 
596 	return (tpc);
597 }
598 
599 static int
tnrhtp_delete(const char * tname)600 tnrhtp_delete(const char *tname)
601 {
602 	tsol_tpc_t *tpc;
603 	mod_hash_val_t hv;
604 	int retv = ENOENT;
605 
606 	mutex_enter(&tpc_lock);
607 	if (mod_hash_find(tpc_name_hash, (mod_hash_key_t)tname, &hv) == 0) {
608 		tpc = (tsol_tpc_t *)hv;
609 		ASSERT(!tpc->tpc_invalid);
610 		tpc->tpc_invalid = 1;
611 		(void) mod_hash_destroy(tpc_name_hash,
612 		    (mod_hash_key_t)tpc->tpc_tp.name);
613 		TPC_RELE(tpc);
614 		retv = 0;
615 	}
616 	mutex_exit(&tpc_lock);
617 	return (retv);
618 }
619 
620 /* ARGSUSED */
621 static uint_t
tpc_delete(mod_hash_key_t key,mod_hash_val_t * val,void * arg)622 tpc_delete(mod_hash_key_t key, mod_hash_val_t *val, void *arg)
623 {
624 	tsol_tpc_t *tpc = (tsol_tpc_t *)val;
625 
626 	ASSERT(!tpc->tpc_invalid);
627 	tpc->tpc_invalid = 1;
628 	TPC_RELE(tpc);
629 	return (MH_WALK_CONTINUE);
630 }
631 
632 static void
tnrhtp_flush(void)633 tnrhtp_flush(void)
634 {
635 	mutex_enter(&tpc_lock);
636 	mod_hash_walk(tpc_name_hash, tpc_delete, NULL);
637 	mod_hash_clear(tpc_name_hash);
638 	mutex_exit(&tpc_lock);
639 }
640 
641 static int
tnrhtp(int cmd,void * buf)642 tnrhtp(int cmd, void *buf)
643 {
644 	int retv;
645 	int type;
646 	tsol_tpent_t rhtpent;
647 	tsol_tpc_t *tpc;
648 
649 	/* Make sure user has sufficient privilege */
650 	if (cmd != TNDB_GET &&
651 	    (retv = secpolicy_net_config(CRED(), B_FALSE)) != 0)
652 		return (set_errno(retv));
653 
654 	/*
655 	 * Get argument.  Note that tsol_tpent_t is the same on LP64 and ILP32,
656 	 * so no special handling is required.
657 	 */
658 	if (cmd != TNDB_FLUSH) {
659 		if (copyin(buf, &rhtpent, sizeof (rhtpent)) != 0) {
660 			DTRACE_PROBE(tx__tndb__l0__tnrhtp__copyin);
661 			return (set_errno(EFAULT));
662 		}
663 
664 		/*
665 		 * Don't let the user give us a bogus (unterminated) template
666 		 * name.
667 		 */
668 		if (!template_name_ok(rhtpent.name))
669 			return (set_errno(EINVAL));
670 	}
671 
672 	switch (cmd) {
673 	case TNDB_LOAD:
674 		DTRACE_PROBE1(tx__tndb__l2__tnrhtp__tndbload, char *,
675 			rhtpent.name);
676 		type = rhtpent.host_type;
677 		if (type != UNLABELED && type != SUN_CIPSO) {
678 			retv = EINVAL;
679 			break;
680 		}
681 
682 		if (tnrhtp_create(&rhtpent, KM_NOSLEEP) == NULL)
683 			retv = ENOMEM;
684 		else
685 			retv = 0;
686 		break;
687 
688 	case TNDB_GET:
689 		DTRACE_PROBE1(tx__tndb__l4__tnrhtp__tndbget, char *,
690 		    rhtpent.name);
691 		tpc = tnrhtp_find(rhtpent.name, tpc_name_hash);
692 		if (tpc == NULL) {
693 			retv = ENOENT;
694 			break;
695 		}
696 
697 		/* Copy out result */
698 		if (copyout(&tpc->tpc_tp, buf, sizeof (tpc->tpc_tp)) != 0) {
699 			DTRACE_PROBE(tx__tndb__l0__tnrhtp__copyout);
700 			retv = EFAULT;
701 		} else {
702 			retv = 0;
703 		}
704 		TPC_RELE(tpc);
705 		break;
706 
707 	case TNDB_DELETE:
708 		DTRACE_PROBE1(tx__tndb__l4__tnrhtp__tndbdelete, char *,
709 		    rhtpent.name);
710 		retv = tnrhtp_delete(rhtpent.name);
711 		break;
712 
713 	case TNDB_FLUSH:
714 		DTRACE_PROBE(tx__tndb__l4__tnrhtp__flush);
715 		tnrhtp_flush();
716 		retv = 0;
717 		break;
718 
719 	default:
720 		DTRACE_PROBE1(tx__tndb__l0__tnrhtp__unknowncmd, int,
721 		    cmd);
722 		retv = EOPNOTSUPP;
723 		break;
724 	}
725 
726 	if (retv != 0)
727 		return (set_errno(retv));
728 	else
729 		return (retv);
730 }
731 
732 /*
733  * MLP entry ordering logic
734  *
735  * There are two loops in this routine.  The first loop finds the entry that
736  * either logically follows the new entry to be inserted, or is the entry that
737  * precedes and overlaps the new entry, or is NULL to mean end-of-list.  This
738  * is 'tme.'  The second loop scans ahead from that point to find any overlap
739  * on the front or back of this new entry.
740  *
741  * For the first loop, we can have the following cases in the list (note that
742  * the port-portmax range is inclusive):
743  *
744  *	       port   portmax
745  *		+--------+
746  * 1: +------+ ................... precedes; skip to next
747  * 2:	    +------+ ............. overlaps; stop here if same protocol
748  * 3:		+------+ ......... overlaps; stop if same or higher protocol
749  * 4:		    +-------+ .... overlaps or succeeds; stop here
750  *
751  * For the second loop, we can have the following cases (note that we need not
752  * care about other protocol entries at this point, because we're only looking
753  * for overlap, not an insertion point):
754  *
755  *	       port   portmax
756  *		+--------+
757  * 5:	    +------+ ............. overlaps; stop if same protocol
758  * 6:		+------+ ......... overlaps; stop if same protocol
759  * 7:		    +-------+ .... overlaps; stop if same protocol
760  * 8:			   +---+ . follows; search is done
761  *
762  * In other words, this second search needs to consider only whether the entry
763  * has a starting port number that's greater than the end point of the new
764  * entry.  All others are overlaps.
765  */
766 static int
mlp_add_del(tsol_mlp_list_t * mlpl,zoneid_t zoneid,uint8_t proto,uint16_t port,uint16_t portmax,boolean_t addflag)767 mlp_add_del(tsol_mlp_list_t *mlpl, zoneid_t zoneid, uint8_t proto,
768     uint16_t port, uint16_t portmax, boolean_t addflag)
769 {
770 	int retv;
771 	tsol_mlp_entry_t *tme, *tme2, *newent;
772 
773 	if (addflag) {
774 		if ((newent = kmem_zalloc(sizeof (*newent), KM_NOSLEEP)) ==
775 		    NULL)
776 			return (ENOMEM);
777 	} else {
778 		newent = NULL;
779 	}
780 	rw_enter(&mlpl->mlpl_rwlock, RW_WRITER);
781 
782 	/*
783 	 * First loop: find logical insertion point or overlap.  Table is kept
784 	 * in order of port number first, and then, within that, by protocol
785 	 * number.
786 	 */
787 	for (tme = mlpl->mlpl_first; tme != NULL; tme = tme->mlpe_next) {
788 		/* logically next (case 4) */
789 		if (tme->mlpe_mlp.mlp_port > port)
790 			break;
791 		/* if this is logically next or overlap, then stop (case 3) */
792 		if (tme->mlpe_mlp.mlp_port == port &&
793 		    tme->mlpe_mlp.mlp_ipp >= proto)
794 			break;
795 		/* earlier or same port sequence; check for overlap (case 2) */
796 		if (tme->mlpe_mlp.mlp_ipp == proto &&
797 		    tme->mlpe_mlp.mlp_port_upper >= port)
798 			break;
799 		/* otherwise, loop again (case 1) */
800 	}
801 
802 	/* Second loop: scan ahead for overlap */
803 	for (tme2 = tme; tme2 != NULL; tme2 = tme2->mlpe_next) {
804 		/* check if entry follows; no overlap (case 8) */
805 		if (tme2->mlpe_mlp.mlp_port > portmax) {
806 			tme2 = NULL;
807 			break;
808 		}
809 		/* only exact protocol matches at this point (cases 5-7) */
810 		if (tme2->mlpe_mlp.mlp_ipp == proto)
811 			break;
812 	}
813 
814 	retv = 0;
815 	if (addflag) {
816 		if (tme2 != NULL) {
817 			retv = EEXIST;
818 		} else {
819 			newent->mlpe_zoneid = zoneid;
820 			newent->mlpe_mlp.mlp_ipp = proto;
821 			newent->mlpe_mlp.mlp_port = port;
822 			newent->mlpe_mlp.mlp_port_upper = portmax;
823 			newent->mlpe_next = tme;
824 			if (tme == NULL) {
825 				tme2 = mlpl->mlpl_last;
826 				mlpl->mlpl_last = newent;
827 			} else {
828 				tme2 = tme->mlpe_prev;
829 				tme->mlpe_prev = newent;
830 			}
831 			newent->mlpe_prev = tme2;
832 			if (tme2 == NULL)
833 				mlpl->mlpl_first = newent;
834 			else
835 				tme2->mlpe_next = newent;
836 			newent = NULL;
837 		}
838 	} else {
839 		if (tme2 == NULL || tme2->mlpe_mlp.mlp_port != port ||
840 		    tme2->mlpe_mlp.mlp_port_upper != portmax) {
841 			retv = ENOENT;
842 		} else {
843 			if ((tme2 = tme->mlpe_prev) == NULL)
844 				mlpl->mlpl_first = tme->mlpe_next;
845 			else
846 				tme2->mlpe_next = tme->mlpe_next;
847 			if ((tme2 = tme->mlpe_next) == NULL)
848 				mlpl->mlpl_last = tme->mlpe_prev;
849 			else
850 				tme2->mlpe_prev = tme->mlpe_prev;
851 			newent = tme;
852 		}
853 	}
854 	rw_exit(&mlpl->mlpl_rwlock);
855 
856 	if (newent != NULL)
857 		kmem_free(newent, sizeof (*newent));
858 
859 	return (retv);
860 }
861 
862 /*
863  * Add or remove an MLP entry from the database so that the classifier can find
864  * it.
865  *
866  * Note: port number is in host byte order.
867  */
868 int
tsol_mlp_anon(zone_t * zone,mlp_type_t mlptype,uchar_t proto,uint16_t port,boolean_t addflag)869 tsol_mlp_anon(zone_t *zone, mlp_type_t mlptype, uchar_t proto, uint16_t port,
870     boolean_t addflag)
871 {
872 	int retv = 0;
873 
874 	if (mlptype == mlptBoth || mlptype == mlptPrivate)
875 		retv = mlp_add_del(&zone->zone_mlps, zone->zone_id, proto,
876 		    port, port, addflag);
877 	if ((retv == 0 || !addflag) &&
878 	    (mlptype == mlptBoth || mlptype == mlptShared)) {
879 		retv = mlp_add_del(&shared_mlps, zone->zone_id, proto, port,
880 		    port, addflag);
881 		if (retv != 0 && addflag)
882 			(void) mlp_add_del(&zone->zone_mlps, zone->zone_id,
883 			    proto, port, port, B_FALSE);
884 	}
885 	return (retv);
886 }
887 
888 static void
mlp_flush(tsol_mlp_list_t * mlpl,zoneid_t zoneid)889 mlp_flush(tsol_mlp_list_t *mlpl, zoneid_t zoneid)
890 {
891 	tsol_mlp_entry_t *tme, *tme2, *tmnext;
892 
893 	rw_enter(&mlpl->mlpl_rwlock, RW_WRITER);
894 	for (tme = mlpl->mlpl_first; tme != NULL; tme = tmnext) {
895 		tmnext = tme->mlpe_next;
896 		if (zoneid == ALL_ZONES || tme->mlpe_zoneid == zoneid) {
897 			if ((tme2 = tme->mlpe_prev) == NULL)
898 				mlpl->mlpl_first = tmnext;
899 			else
900 				tme2->mlpe_next = tmnext;
901 			if (tmnext == NULL)
902 				mlpl->mlpl_last = tme2;
903 			else
904 				tmnext->mlpe_prev = tme2;
905 			kmem_free(tme, sizeof (*tme));
906 		}
907 	}
908 	rw_exit(&mlpl->mlpl_rwlock);
909 }
910 
911 /*
912  * Note: user supplies port numbers in host byte order.
913  */
914 static int
tnmlp(int cmd,void * buf)915 tnmlp(int cmd, void *buf)
916 {
917 	int retv;
918 	tsol_mlpent_t tsme;
919 	zone_t *zone;
920 	tsol_mlp_list_t *mlpl;
921 	tsol_mlp_entry_t *tme;
922 
923 	/* Make sure user has sufficient privilege */
924 	if (cmd != TNDB_GET &&
925 	    (retv = secpolicy_net_config(CRED(), B_FALSE)) != 0)
926 		return (set_errno(retv));
927 
928 	/*
929 	 * Get argument.  Note that tsol_mlpent_t is the same on LP64 and
930 	 * ILP32, so no special handling is required.
931 	 */
932 	if (copyin(buf, &tsme, sizeof (tsme)) != 0) {
933 		DTRACE_PROBE(tx__tndb__l0__tnmlp__copyin);
934 		return (set_errno(EFAULT));
935 	}
936 
937 	/* MLPs on shared IP addresses */
938 	if (tsme.tsme_flags & TSOL_MEF_SHARED) {
939 		zone = NULL;
940 		mlpl = &shared_mlps;
941 	} else {
942 		zone = zone_find_by_id(tsme.tsme_zoneid);
943 		if (zone == NULL)
944 			return (set_errno(EINVAL));
945 		mlpl = &zone->zone_mlps;
946 	}
947 	if (tsme.tsme_mlp.mlp_port_upper == 0)
948 		tsme.tsme_mlp.mlp_port_upper = tsme.tsme_mlp.mlp_port;
949 
950 	switch (cmd) {
951 	case TNDB_LOAD:
952 		DTRACE_PROBE1(tx__tndb__l2__tnmlp__tndbload,
953 		    tsol_mlpent_t *, &tsme);
954 		if (tsme.tsme_mlp.mlp_ipp == 0 || tsme.tsme_mlp.mlp_port == 0 ||
955 		    tsme.tsme_mlp.mlp_port > tsme.tsme_mlp.mlp_port_upper) {
956 			retv = EINVAL;
957 			break;
958 		}
959 		retv = mlp_add_del(mlpl, tsme.tsme_zoneid,
960 		    tsme.tsme_mlp.mlp_ipp, tsme.tsme_mlp.mlp_port,
961 		    tsme.tsme_mlp.mlp_port_upper, B_TRUE);
962 		break;
963 
964 	case TNDB_GET:
965 		DTRACE_PROBE1(tx__tndb__l2__tnmlp__tndbget,
966 		    tsol_mlpent_t *, &tsme);
967 
968 		/*
969 		 * Search for the requested element or, failing that, the one
970 		 * that's logically next in the sequence.
971 		 */
972 		rw_enter(&mlpl->mlpl_rwlock, RW_READER);
973 		for (tme = mlpl->mlpl_first; tme != NULL;
974 		    tme = tme->mlpe_next) {
975 			if (tsme.tsme_zoneid != ALL_ZONES &&
976 			    tme->mlpe_zoneid != tsme.tsme_zoneid)
977 				continue;
978 			if (tme->mlpe_mlp.mlp_ipp >= tsme.tsme_mlp.mlp_ipp &&
979 			    tme->mlpe_mlp.mlp_port == tsme.tsme_mlp.mlp_port)
980 				break;
981 			if (tme->mlpe_mlp.mlp_port > tsme.tsme_mlp.mlp_port)
982 				break;
983 		}
984 		if (tme == NULL) {
985 			retv = ENOENT;
986 		} else {
987 			tsme.tsme_zoneid = tme->mlpe_zoneid;
988 			tsme.tsme_mlp = tme->mlpe_mlp;
989 			retv = 0;
990 		}
991 		rw_exit(&mlpl->mlpl_rwlock);
992 		break;
993 
994 	case TNDB_DELETE:
995 		DTRACE_PROBE1(tx__tndb__l4__tnmlp__tndbdelete,
996 		    tsol_mlpent_t *, &tsme);
997 		retv = mlp_add_del(mlpl, tsme.tsme_zoneid,
998 		    tsme.tsme_mlp.mlp_ipp, tsme.tsme_mlp.mlp_port,
999 		    tsme.tsme_mlp.mlp_port_upper, B_FALSE);
1000 		break;
1001 
1002 	case TNDB_FLUSH:
1003 		DTRACE_PROBE1(tx__tndb__l4__tnmlp__tndbflush,
1004 		    tsol_mlpent_t *, &tsme);
1005 		mlp_flush(mlpl, ALL_ZONES);
1006 		mlp_flush(&shared_mlps, tsme.tsme_zoneid);
1007 		retv = 0;
1008 		break;
1009 
1010 	default:
1011 		DTRACE_PROBE1(tx__tndb__l0__tnmlp__unknowncmd, int,
1012 		    cmd);
1013 		retv = EOPNOTSUPP;
1014 		break;
1015 	}
1016 
1017 	if (zone != NULL)
1018 		zone_rele(zone);
1019 
1020 	if (cmd == TNDB_GET && retv == 0) {
1021 		/* Copy out result */
1022 		if (copyout(&tsme, buf, sizeof (tsme)) != 0) {
1023 			DTRACE_PROBE(tx__tndb__l0__tnmlp__copyout);
1024 			retv = EFAULT;
1025 		}
1026 	}
1027 
1028 	if (retv != 0)
1029 		return (set_errno(retv));
1030 	else
1031 		return (retv);
1032 }
1033 
1034 /*
1035  * Returns a tnrhc matching the addr address.
1036  * The returned rhc's refcnt is incremented.
1037  */
1038 tsol_tnrhc_t *
find_rhc(const void * addr,uchar_t version,boolean_t staleok)1039 find_rhc(const void *addr, uchar_t version, boolean_t staleok)
1040 {
1041 	tsol_tnrhc_t *rh = NULL;
1042 	tsol_tnrhc_t *new;
1043 	tsol_tpc_t *tpc;
1044 	tnrhc_hash_t *tnrhc_hash;
1045 	ipaddr_t tmpmask;
1046 	in_addr_t *in4 = (in_addr_t *)addr;
1047 	in6_addr_t *in6 = (in6_addr_t *)addr;
1048 	in_addr_t tmpin4;
1049 	in6_addr_t tmpmask6;
1050 	int	i;
1051 	int	prefix;
1052 
1053 	/*
1054 	 * An IPv4-mapped IPv6 address is really an IPv4 address
1055 	 * in IPv6 format.
1056 	 */
1057 	if (version == IPV6_VERSION &&
1058 	    IN6_IS_ADDR_V4MAPPED(in6)) {
1059 		IN6_V4MAPPED_TO_IPADDR(in6, tmpin4);
1060 		version = IPV4_VERSION;
1061 		in4 = &tmpin4;
1062 	}
1063 
1064 	/*
1065 	 * Search the tnrh hash table for each prefix length,
1066 	 * starting at longest prefix length, until a matching
1067 	 * rhc entry is found.
1068 	 */
1069 	if (version == IPV4_VERSION) {
1070 		for (i = (TSOL_MASK_TABLE_SIZE - 1); i >= 0; i--) {
1071 
1072 			if ((tnrhc_table[i]) == NULL)
1073 				continue;
1074 
1075 			tmpmask = tsol_plen_to_mask(i);
1076 			tnrhc_hash = &tnrhc_table[i][
1077 			    TSOL_ADDR_HASH(*in4 & tmpmask, TNRHC_SIZE)];
1078 
1079 			mutex_enter(&tnrhc_hash->tnrh_lock);
1080 			for (rh = tnrhc_hash->tnrh_list; rh != NULL;
1081 			    rh = rh->rhc_next) {
1082 				if ((rh->rhc_host.ta_family == AF_INET) &&
1083 				    ((rh->rhc_host.ta_addr_v4.s_addr &
1084 				    tmpmask) == (*in4 & tmpmask))) {
1085 					prefix = i;
1086 					TNRHC_HOLD(rh);
1087 					break;
1088 				}
1089 			}
1090 			mutex_exit(&tnrhc_hash->tnrh_lock);
1091 			if (rh != NULL)
1092 				break;
1093 		}
1094 		if (rh == NULL)
1095 			DTRACE_PROBE1(tx__tndb__l1__findrhc__norhv4ent,
1096 			    in_addr_t *, in4);
1097 	} else {
1098 		for (i = (TSOL_MASK_TABLE_SIZE_V6 - 1); i >= 0; i--) {
1099 			if ((tnrhc_table_v6[i]) == NULL)
1100 				continue;
1101 
1102 			tsol_plen_to_mask_v6(i, &tmpmask6);
1103 			tnrhc_hash = &tnrhc_table_v6[i][
1104 			    TSOL_ADDR_MASK_HASH_V6(*in6, tmpmask6, TNRHC_SIZE)];
1105 
1106 			mutex_enter(&tnrhc_hash->tnrh_lock);
1107 			for (rh = tnrhc_hash->tnrh_list; rh != NULL;
1108 			    rh = rh->rhc_next) {
1109 				if ((rh->rhc_host.ta_family == AF_INET6) &&
1110 				    V6_MASK_EQ_2(rh->rhc_host.ta_addr_v6,
1111 				    tmpmask6, *in6)) {
1112 					prefix = i;
1113 					TNRHC_HOLD(rh);
1114 					break;
1115 				}
1116 			}
1117 			mutex_exit(&tnrhc_hash->tnrh_lock);
1118 			if (rh != NULL)
1119 				break;
1120 		}
1121 		if (rh == NULL)
1122 			DTRACE_PROBE1(tx__tndb__l1__findrhc__norhv6ent,
1123 			    in6_addr_t *, in6);
1124 	}
1125 
1126 	/*
1127 	 * Does the tnrh entry point to a stale template?
1128 	 * This can happen any time the user deletes or modifies
1129 	 * a template that has existing tnrh entries pointing
1130 	 * to it. Try to find a new version of the template.
1131 	 * If there is no template, then just give up.
1132 	 * If the template exists, reload the tnrh entry.
1133 	 */
1134 	if (rh != NULL && rh->rhc_tpc->tpc_invalid) {
1135 		tpc = tnrhtp_find(rh->rhc_tpc->tpc_tp.name, tpc_name_hash);
1136 		if (tpc == NULL) {
1137 			if (!staleok) {
1138 				DTRACE_PROBE2(tx__tndb__l1__findrhc__staletpc,
1139 				    tsol_tnrhc_t *, rh, tsol_tpc_t *,
1140 				    rh->rhc_tpc);
1141 				TNRHC_RELE(rh);
1142 				rh = NULL;
1143 			}
1144 		} else {
1145 			ASSERT(tpc->tpc_tp.host_type == UNLABELED ||
1146 			    tpc->tpc_tp.host_type == SUN_CIPSO);
1147 
1148 			if ((new = kmem_zalloc(sizeof (*new),
1149 			    KM_NOSLEEP)) == NULL) {
1150 				DTRACE_PROBE(tx__tndb__l1__findrhc__nomem);
1151 				TNRHC_RELE(rh);
1152 				TPC_RELE(tpc);
1153 				return (NULL);
1154 			}
1155 
1156 			mutex_init(&new->rhc_lock, NULL, MUTEX_DEFAULT, NULL);
1157 			new->rhc_host = rh->rhc_host;
1158 			new->rhc_tpc = tpc;
1159 			new->rhc_isbcast = rh->rhc_isbcast;
1160 			new->rhc_local = rh->rhc_local;
1161 			TNRHC_RELE(rh);
1162 			rh = new;
1163 
1164 			/*
1165 			 * This function increments the tnrh entry ref count
1166 			 * for the pointer returned to the caller.
1167 			 * tnrh_hash_add increments the tnrh entry ref count
1168 			 * for the pointer in the hash table.
1169 			 */
1170 			TNRHC_HOLD(rh);
1171 			if (tnrh_hash_add(new, prefix) != 0) {
1172 				TNRHC_RELE(rh);
1173 				rh = NULL;
1174 			}
1175 		}
1176 	}
1177 	return (rh);
1178 }
1179 
1180 tsol_tpc_t *
find_tpc(const void * addr,uchar_t version,boolean_t staleok)1181 find_tpc(const void *addr, uchar_t version, boolean_t staleok)
1182 {
1183 	tsol_tpc_t *tpc;
1184 	tsol_tnrhc_t *rhc;
1185 
1186 	if ((rhc = find_rhc(addr, version, staleok)) == NULL)
1187 		return (NULL);
1188 
1189 	tpc = rhc->rhc_tpc;
1190 	TPC_HOLD(tpc);
1191 	TNRHC_RELE(rhc);
1192 	return (tpc);
1193 }
1194 
1195 /*
1196  * create an internal template called "_unlab":
1197  *
1198  * _unlab;\
1199  *	host_type = unlabeled;\
1200  *	def_label = ADMIN_LOW[ADMIN_LOW];\
1201  *	min_sl = ADMIN_LOW;\
1202  *	max_sl = ADMIN_HIGH;
1203  */
1204 static void
tsol_create_i_tmpls(void)1205 tsol_create_i_tmpls(void)
1206 {
1207 	tsol_tpent_t rhtpent;
1208 
1209 	bzero(&rhtpent, sizeof (rhtpent));
1210 
1211 	/* create _unlab */
1212 	(void) strcpy(rhtpent.name, "_unlab");
1213 
1214 	rhtpent.host_type = UNLABELED;
1215 	rhtpent.tp_mask_unl = TSOL_MSK_DEF_LABEL | TSOL_MSK_DEF_CL |
1216 	    TSOL_MSK_SL_RANGE_TSOL;
1217 
1218 	rhtpent.tp_gw_sl_range.lower_bound = *label2bslabel(l_admin_low);
1219 	rhtpent.tp_def_label = rhtpent.tp_gw_sl_range.lower_bound;
1220 	rhtpent.tp_gw_sl_range.upper_bound = *label2bslabel(l_admin_high);
1221 	rhtpent.tp_cipso_doi_unl = default_doi;
1222 	tpc_unlab = tnrhtp_create(&rhtpent, KM_SLEEP);
1223 }
1224 
1225 /*
1226  * set up internal host template, called from kernel only.
1227  */
1228 static void
tsol_create_i_tnrh(const tnaddr_t * sa)1229 tsol_create_i_tnrh(const tnaddr_t *sa)
1230 {
1231 	tsol_tnrhc_t *rh, *new;
1232 	tnrhc_hash_t *tnrhc_hash;
1233 
1234 	/* Allocate a new entry before taking the lock */
1235 	new = kmem_zalloc(sizeof (*new), KM_SLEEP);
1236 
1237 	tnrhc_hash = (sa->ta_family == AF_INET) ? &tnrhc_table[0][0] :
1238 	    &tnrhc_table_v6[0][0];
1239 
1240 	mutex_enter(&tnrhc_hash->tnrh_lock);
1241 	rh = tnrhc_hash->tnrh_list;
1242 
1243 	if (rh == NULL) {
1244 		/* We're keeping the new entry. */
1245 		rh = new;
1246 		new = NULL;
1247 		rh->rhc_host = *sa;
1248 		mutex_init(&rh->rhc_lock, NULL, MUTEX_DEFAULT, NULL);
1249 		TNRHC_HOLD(rh);
1250 		tnrhc_hash->tnrh_list = rh;
1251 	}
1252 
1253 	/*
1254 	 * Link the entry to internal_unlab
1255 	 */
1256 	if (rh->rhc_tpc != tpc_unlab) {
1257 		if (rh->rhc_tpc != NULL)
1258 			TPC_RELE(rh->rhc_tpc);
1259 		rh->rhc_tpc = tpc_unlab;
1260 		TPC_HOLD(tpc_unlab);
1261 	}
1262 	mutex_exit(&tnrhc_hash->tnrh_lock);
1263 	if (new != NULL)
1264 		kmem_free(new, sizeof (*new));
1265 }
1266 
1267 /*
1268  * Returns 0 if the port is known to be SLP.  Returns next possible port number
1269  * (wrapping through 1) if port is MLP on shared or global.  Administrator
1270  * should not make all ports MLP.  If that's done, then we'll just pretend
1271  * everything is SLP to avoid looping forever.
1272  *
1273  * Note: port is in host byte order.
1274  */
1275 in_port_t
tsol_next_port(zone_t * zone,in_port_t port,int proto,boolean_t upward)1276 tsol_next_port(zone_t *zone, in_port_t port, int proto, boolean_t upward)
1277 {
1278 	boolean_t loop;
1279 	tsol_mlp_entry_t *tme;
1280 	int newport = port;
1281 
1282 	loop = B_FALSE;
1283 	for (;;) {
1284 		if (zone != NULL && zone->zone_mlps.mlpl_first != NULL) {
1285 			rw_enter(&zone->zone_mlps.mlpl_rwlock, RW_READER);
1286 			for (tme = zone->zone_mlps.mlpl_first; tme != NULL;
1287 			    tme = tme->mlpe_next) {
1288 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1289 				    newport >= tme->mlpe_mlp.mlp_port &&
1290 				    newport <= tme->mlpe_mlp.mlp_port_upper)
1291 					newport = upward ?
1292 					    tme->mlpe_mlp.mlp_port_upper + 1 :
1293 					    tme->mlpe_mlp.mlp_port - 1;
1294 			}
1295 			rw_exit(&zone->zone_mlps.mlpl_rwlock);
1296 		}
1297 		if (shared_mlps.mlpl_first != NULL) {
1298 			rw_enter(&shared_mlps.mlpl_rwlock, RW_READER);
1299 			for (tme = shared_mlps.mlpl_first; tme != NULL;
1300 			    tme = tme->mlpe_next) {
1301 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1302 				    newport >= tme->mlpe_mlp.mlp_port &&
1303 				    newport <= tme->mlpe_mlp.mlp_port_upper)
1304 					newport = upward ?
1305 					    tme->mlpe_mlp.mlp_port_upper + 1 :
1306 					    tme->mlpe_mlp.mlp_port - 1;
1307 			}
1308 			rw_exit(&shared_mlps.mlpl_rwlock);
1309 		}
1310 		if (newport <= 65535 && newport > 0)
1311 			break;
1312 		if (loop)
1313 			return (0);
1314 		loop = B_TRUE;
1315 		newport = upward ? 1 : 65535;
1316 	}
1317 	return (newport == port ? 0 : newport);
1318 }
1319 
1320 /*
1321  * tsol_mlp_port_type will check if the given (zone, proto, port) is a
1322  * multilevel port.  If it is, return the type (shared, private, or both), or
1323  * indicate that it's single-level.
1324  *
1325  * Note: port is given in host byte order, not network byte order.
1326  */
1327 mlp_type_t
tsol_mlp_port_type(zone_t * zone,uchar_t proto,uint16_t port,mlp_type_t mlptype)1328 tsol_mlp_port_type(zone_t *zone, uchar_t proto, uint16_t port,
1329     mlp_type_t mlptype)
1330 {
1331 	tsol_mlp_entry_t *tme;
1332 
1333 	if (mlptype == mlptBoth || mlptype == mlptPrivate) {
1334 		tme = NULL;
1335 		if (zone->zone_mlps.mlpl_first != NULL) {
1336 			rw_enter(&zone->zone_mlps.mlpl_rwlock, RW_READER);
1337 			for (tme = zone->zone_mlps.mlpl_first; tme != NULL;
1338 			    tme = tme->mlpe_next) {
1339 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1340 				    port >= tme->mlpe_mlp.mlp_port &&
1341 				    port <= tme->mlpe_mlp.mlp_port_upper)
1342 					break;
1343 			}
1344 			rw_exit(&zone->zone_mlps.mlpl_rwlock);
1345 		}
1346 		if (tme == NULL) {
1347 			if (mlptype == mlptBoth)
1348 				mlptype = mlptShared;
1349 			else if (mlptype == mlptPrivate)
1350 				mlptype = mlptSingle;
1351 		}
1352 	}
1353 	if (mlptype == mlptBoth || mlptype == mlptShared) {
1354 		tme = NULL;
1355 		if (shared_mlps.mlpl_first != NULL) {
1356 			rw_enter(&shared_mlps.mlpl_rwlock, RW_READER);
1357 			for (tme = shared_mlps.mlpl_first; tme != NULL;
1358 			    tme = tme->mlpe_next) {
1359 				if (proto == tme->mlpe_mlp.mlp_ipp &&
1360 				    port >= tme->mlpe_mlp.mlp_port &&
1361 				    port <= tme->mlpe_mlp.mlp_port_upper)
1362 					break;
1363 			}
1364 			rw_exit(&shared_mlps.mlpl_rwlock);
1365 		}
1366 		if (tme == NULL) {
1367 			if (mlptype == mlptBoth)
1368 				mlptype = mlptPrivate;
1369 			else if (mlptype == mlptShared)
1370 				mlptype = mlptSingle;
1371 		}
1372 	}
1373 	return (mlptype);
1374 }
1375 
1376 /*
1377  * tsol_mlp_findzone will check if the given (proto, port) is a multilevel port
1378  * on a shared address.  If it is, return the owning zone.
1379  *
1380  * Note: lport is in network byte order, unlike the other MLP functions,
1381  * because the callers of this function are all dealing with packets off the
1382  * wire.
1383  */
1384 zoneid_t
tsol_mlp_findzone(uchar_t proto,uint16_t lport)1385 tsol_mlp_findzone(uchar_t proto, uint16_t lport)
1386 {
1387 	tsol_mlp_entry_t *tme;
1388 	zoneid_t zoneid;
1389 	uint16_t port;
1390 
1391 	if (shared_mlps.mlpl_first == NULL)
1392 		return (ALL_ZONES);
1393 	port = ntohs(lport);
1394 	rw_enter(&shared_mlps.mlpl_rwlock, RW_READER);
1395 	for (tme = shared_mlps.mlpl_first; tme != NULL; tme = tme->mlpe_next) {
1396 		if (proto == tme->mlpe_mlp.mlp_ipp &&
1397 		    port >= tme->mlpe_mlp.mlp_port &&
1398 		    port <= tme->mlpe_mlp.mlp_port_upper)
1399 			break;
1400 	}
1401 	zoneid = tme == NULL ? ALL_ZONES : tme->mlpe_zoneid;
1402 	rw_exit(&shared_mlps.mlpl_rwlock);
1403 	return (zoneid);
1404 }
1405 
1406 /* Debug routine */
1407 void
tsol_print_label(const blevel_t * blev,const char * name)1408 tsol_print_label(const blevel_t *blev, const char *name)
1409 {
1410 	const _blevel_impl_t *bli = (const _blevel_impl_t *)blev;
1411 
1412 	/* We really support only sensitivity labels */
1413 	cmn_err(CE_NOTE, "%s %x:%x:%08x%08x%08x%08x%08x%08x%08x%08x",
1414 	    name, bli->id, LCLASS(bli), ntohl(bli->_comps.c1),
1415 	    ntohl(bli->_comps.c2), ntohl(bli->_comps.c3), ntohl(bli->_comps.c4),
1416 	    ntohl(bli->_comps.c5), ntohl(bli->_comps.c6), ntohl(bli->_comps.c7),
1417 	    ntohl(bli->_comps.c8));
1418 }
1419 
1420 /*
1421  * Name:	labelsys()
1422  *
1423  * Normal:	Routes TSOL syscalls.
1424  *
1425  * Output:	As defined for each TSOL syscall.
1426  *		Returns ENOSYS for unrecognized calls.
1427  */
1428 /* ARGSUSED */
1429 int
labelsys(int op,void * a1,void * a2,void * a3,void * a4,void * a5)1430 labelsys(int op, void *a1, void *a2, void *a3, void *a4, void *a5)
1431 {
1432 	switch (op) {
1433 	case TSOL_SYSLABELING:
1434 		return (sys_labeling);
1435 	case TSOL_TNRH:
1436 		return (tnrh((int)(uintptr_t)a1, a2));
1437 	case TSOL_TNRHTP:
1438 		return (tnrhtp((int)(uintptr_t)a1, a2));
1439 	case TSOL_TNMLP:
1440 		return (tnmlp((int)(uintptr_t)a1, a2));
1441 	case TSOL_GETLABEL:
1442 		return (getlabel((char *)a1, (bslabel_t *)a2));
1443 	case TSOL_FGETLABEL:
1444 		return (fgetlabel((int)(uintptr_t)a1, (bslabel_t *)a2));
1445 	default:
1446 		return (set_errno(ENOSYS));
1447 	}
1448 	/* NOTREACHED */
1449 }
1450