xref: /linux/security/apparmor/policy_unpack.c (revision b11e51dd70947107fa4076c6286dce301671afc1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AppArmor security module
4  *
5  * This file contains AppArmor functions for unpacking policy loaded from
6  * userspace.
7  *
8  * Copyright (C) 1998-2008 Novell/SUSE
9  * Copyright 2009-2010 Canonical Ltd.
10  *
11  * AppArmor uses a serialized binary format for loading policy. To find
12  * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13  * All policy is validated before it is used.
14  */
15 
16 #include <asm/unaligned.h>
17 #include <kunit/visibility.h>
18 #include <linux/ctype.h>
19 #include <linux/errno.h>
20 #include <linux/zlib.h>
21 
22 #include "include/apparmor.h"
23 #include "include/audit.h"
24 #include "include/cred.h"
25 #include "include/crypto.h"
26 #include "include/match.h"
27 #include "include/path.h"
28 #include "include/policy.h"
29 #include "include/policy_unpack.h"
30 
31 #define K_ABI_MASK 0x3ff
32 #define FORCE_COMPLAIN_FLAG 0x800
33 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
34 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
35 
36 #define v5	5	/* base version */
37 #define v6	6	/* per entry policydb mediation check */
38 #define v7	7
39 #define v8	8	/* full network masking */
40 
41 /* audit callback for unpack fields */
42 static void audit_cb(struct audit_buffer *ab, void *va)
43 {
44 	struct common_audit_data *sa = va;
45 
46 	if (aad(sa)->iface.ns) {
47 		audit_log_format(ab, " ns=");
48 		audit_log_untrustedstring(ab, aad(sa)->iface.ns);
49 	}
50 	if (aad(sa)->name) {
51 		audit_log_format(ab, " name=");
52 		audit_log_untrustedstring(ab, aad(sa)->name);
53 	}
54 	if (aad(sa)->iface.pos)
55 		audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
56 }
57 
58 /**
59  * audit_iface - do audit message for policy unpacking/load/replace/remove
60  * @new: profile if it has been allocated (MAYBE NULL)
61  * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
62  * @name: name of the profile being manipulated (MAYBE NULL)
63  * @info: any extra info about the failure (MAYBE NULL)
64  * @e: buffer position info
65  * @error: error code
66  *
67  * Returns: %0 or error
68  */
69 static int audit_iface(struct aa_profile *new, const char *ns_name,
70 		       const char *name, const char *info, struct aa_ext *e,
71 		       int error)
72 {
73 	struct aa_profile *profile = labels_profile(aa_current_raw_label());
74 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
75 	if (e)
76 		aad(&sa)->iface.pos = e->pos - e->start;
77 	aad(&sa)->iface.ns = ns_name;
78 	if (new)
79 		aad(&sa)->name = new->base.hname;
80 	else
81 		aad(&sa)->name = name;
82 	aad(&sa)->info = info;
83 	aad(&sa)->error = error;
84 
85 	return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
86 }
87 
88 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
89 {
90 	AA_BUG(!data);
91 	AA_BUG(!data->ns);
92 	AA_BUG(!mutex_is_locked(&data->ns->lock));
93 	AA_BUG(data->revision > revision);
94 
95 	data->revision = revision;
96 	if ((data->dents[AAFS_LOADDATA_REVISION])) {
97 		d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
98 			current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
99 		d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
100 			current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
101 	}
102 }
103 
104 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
105 {
106 	if (l->size != r->size)
107 		return false;
108 	if (l->compressed_size != r->compressed_size)
109 		return false;
110 	if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
111 		return false;
112 	return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
113 }
114 
115 /*
116  * need to take the ns mutex lock which is NOT safe most places that
117  * put_loaddata is called, so we have to delay freeing it
118  */
119 static void do_loaddata_free(struct work_struct *work)
120 {
121 	struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
122 	struct aa_ns *ns = aa_get_ns(d->ns);
123 
124 	if (ns) {
125 		mutex_lock_nested(&ns->lock, ns->level);
126 		__aa_fs_remove_rawdata(d);
127 		mutex_unlock(&ns->lock);
128 		aa_put_ns(ns);
129 	}
130 
131 	kfree_sensitive(d->hash);
132 	kfree_sensitive(d->name);
133 	kvfree(d->data);
134 	kfree_sensitive(d);
135 }
136 
137 void aa_loaddata_kref(struct kref *kref)
138 {
139 	struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
140 
141 	if (d) {
142 		INIT_WORK(&d->work, do_loaddata_free);
143 		schedule_work(&d->work);
144 	}
145 }
146 
147 struct aa_loaddata *aa_loaddata_alloc(size_t size)
148 {
149 	struct aa_loaddata *d;
150 
151 	d = kzalloc(sizeof(*d), GFP_KERNEL);
152 	if (d == NULL)
153 		return ERR_PTR(-ENOMEM);
154 	d->data = kvzalloc(size, GFP_KERNEL);
155 	if (!d->data) {
156 		kfree(d);
157 		return ERR_PTR(-ENOMEM);
158 	}
159 	kref_init(&d->count);
160 	INIT_LIST_HEAD(&d->list);
161 
162 	return d;
163 }
164 
165 /* test if read will be in packed data bounds */
166 VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size)
167 {
168 	return (size <= e->end - e->pos);
169 }
170 EXPORT_SYMBOL_IF_KUNIT(aa_inbounds);
171 
172 static void *kvmemdup(const void *src, size_t len)
173 {
174 	void *p = kvmalloc(len, GFP_KERNEL);
175 
176 	if (p)
177 		memcpy(p, src, len);
178 	return p;
179 }
180 
181 /**
182  * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
183  * @e: serialized data read head (NOT NULL)
184  * @chunk: start address for chunk of data (NOT NULL)
185  *
186  * Returns: the size of chunk found with the read head at the end of the chunk.
187  */
188 VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk)
189 {
190 	size_t size = 0;
191 	void *pos = e->pos;
192 
193 	if (!aa_inbounds(e, sizeof(u16)))
194 		goto fail;
195 	size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
196 	e->pos += sizeof(__le16);
197 	if (!aa_inbounds(e, size))
198 		goto fail;
199 	*chunk = e->pos;
200 	e->pos += size;
201 	return size;
202 
203 fail:
204 	e->pos = pos;
205 	return 0;
206 }
207 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk);
208 
209 /* unpack control byte */
210 VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code)
211 {
212 	if (!aa_inbounds(e, 1))
213 		return false;
214 	if (*(u8 *) e->pos != code)
215 		return false;
216 	e->pos++;
217 	return true;
218 }
219 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X);
220 
221 /**
222  * aa_unpack_nameX - check is the next element is of type X with a name of @name
223  * @e: serialized data extent information  (NOT NULL)
224  * @code: type code
225  * @name: name to match to the serialized element.  (MAYBE NULL)
226  *
227  * check that the next serialized data element is of type X and has a tag
228  * name @name.  If @name is specified then there must be a matching
229  * name element in the stream.  If @name is NULL any name element will be
230  * skipped and only the typecode will be tested.
231  *
232  * Returns true on success (both type code and name tests match) and the read
233  * head is advanced past the headers
234  *
235  * Returns: false if either match fails, the read head does not move
236  */
237 VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
238 {
239 	/*
240 	 * May need to reset pos if name or type doesn't match
241 	 */
242 	void *pos = e->pos;
243 	/*
244 	 * Check for presence of a tagname, and if present name size
245 	 * AA_NAME tag value is a u16.
246 	 */
247 	if (aa_unpack_X(e, AA_NAME)) {
248 		char *tag = NULL;
249 		size_t size = aa_unpack_u16_chunk(e, &tag);
250 		/* if a name is specified it must match. otherwise skip tag */
251 		if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
252 			goto fail;
253 	} else if (name) {
254 		/* if a name is specified and there is no name tag fail */
255 		goto fail;
256 	}
257 
258 	/* now check if type code matches */
259 	if (aa_unpack_X(e, code))
260 		return true;
261 
262 fail:
263 	e->pos = pos;
264 	return false;
265 }
266 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX);
267 
268 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
269 {
270 	void *pos = e->pos;
271 
272 	if (aa_unpack_nameX(e, AA_U8, name)) {
273 		if (!aa_inbounds(e, sizeof(u8)))
274 			goto fail;
275 		if (data)
276 			*data = *((u8 *)e->pos);
277 		e->pos += sizeof(u8);
278 		return true;
279 	}
280 
281 fail:
282 	e->pos = pos;
283 	return false;
284 }
285 
286 VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name)
287 {
288 	void *pos = e->pos;
289 
290 	if (aa_unpack_nameX(e, AA_U32, name)) {
291 		if (!aa_inbounds(e, sizeof(u32)))
292 			goto fail;
293 		if (data)
294 			*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
295 		e->pos += sizeof(u32);
296 		return true;
297 	}
298 
299 fail:
300 	e->pos = pos;
301 	return false;
302 }
303 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32);
304 
305 VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name)
306 {
307 	void *pos = e->pos;
308 
309 	if (aa_unpack_nameX(e, AA_U64, name)) {
310 		if (!aa_inbounds(e, sizeof(u64)))
311 			goto fail;
312 		if (data)
313 			*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
314 		e->pos += sizeof(u64);
315 		return true;
316 	}
317 
318 fail:
319 	e->pos = pos;
320 	return false;
321 }
322 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64);
323 
324 VISIBLE_IF_KUNIT size_t aa_unpack_array(struct aa_ext *e, const char *name)
325 {
326 	void *pos = e->pos;
327 
328 	if (aa_unpack_nameX(e, AA_ARRAY, name)) {
329 		int size;
330 		if (!aa_inbounds(e, sizeof(u16)))
331 			goto fail;
332 		size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
333 		e->pos += sizeof(u16);
334 		return size;
335 	}
336 
337 fail:
338 	e->pos = pos;
339 	return 0;
340 }
341 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array);
342 
343 VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name)
344 {
345 	void *pos = e->pos;
346 
347 	if (aa_unpack_nameX(e, AA_BLOB, name)) {
348 		u32 size;
349 		if (!aa_inbounds(e, sizeof(u32)))
350 			goto fail;
351 		size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
352 		e->pos += sizeof(u32);
353 		if (aa_inbounds(e, (size_t) size)) {
354 			*blob = e->pos;
355 			e->pos += size;
356 			return size;
357 		}
358 	}
359 
360 fail:
361 	e->pos = pos;
362 	return 0;
363 }
364 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob);
365 
366 VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name)
367 {
368 	char *src_str;
369 	size_t size = 0;
370 	void *pos = e->pos;
371 	*string = NULL;
372 	if (aa_unpack_nameX(e, AA_STRING, name)) {
373 		size = aa_unpack_u16_chunk(e, &src_str);
374 		if (size) {
375 			/* strings are null terminated, length is size - 1 */
376 			if (src_str[size - 1] != 0)
377 				goto fail;
378 			*string = src_str;
379 
380 			return size;
381 		}
382 	}
383 
384 fail:
385 	e->pos = pos;
386 	return 0;
387 }
388 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str);
389 
390 VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name)
391 {
392 	const char *tmp;
393 	void *pos = e->pos;
394 	int res = aa_unpack_str(e, &tmp, name);
395 	*string = NULL;
396 
397 	if (!res)
398 		return 0;
399 
400 	*string = kmemdup(tmp, res, GFP_KERNEL);
401 	if (!*string) {
402 		e->pos = pos;
403 		return 0;
404 	}
405 
406 	return res;
407 }
408 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup);
409 
410 
411 /**
412  * unpack_dfa - unpack a file rule dfa
413  * @e: serialized data extent information (NOT NULL)
414  *
415  * returns dfa or ERR_PTR or NULL if no dfa
416  */
417 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
418 {
419 	char *blob = NULL;
420 	size_t size;
421 	struct aa_dfa *dfa = NULL;
422 
423 	size = aa_unpack_blob(e, &blob, "aadfa");
424 	if (size) {
425 		/*
426 		 * The dfa is aligned with in the blob to 8 bytes
427 		 * from the beginning of the stream.
428 		 * alignment adjust needed by dfa unpack
429 		 */
430 		size_t sz = blob - (char *) e->start -
431 			((e->pos - e->start) & 7);
432 		size_t pad = ALIGN(sz, 8) - sz;
433 		int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
434 			TO_ACCEPT2_FLAG(YYTD_DATA32);
435 		if (aa_g_paranoid_load)
436 			flags |= DFA_FLAG_VERIFY_STATES;
437 		dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
438 
439 		if (IS_ERR(dfa))
440 			return dfa;
441 
442 	}
443 
444 	return dfa;
445 }
446 
447 /**
448  * unpack_trans_table - unpack a profile transition table
449  * @e: serialized data extent information  (NOT NULL)
450  * @profile: profile to add the accept table to (NOT NULL)
451  *
452  * Returns: true if table successfully unpacked
453  */
454 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
455 {
456 	void *saved_pos = e->pos;
457 
458 	/* exec table is optional */
459 	if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) {
460 		int i, size;
461 
462 		size = aa_unpack_array(e, NULL);
463 		/* currently 4 exec bits and entries 0-3 are reserved iupcx */
464 		if (size > 16 - 4)
465 			goto fail;
466 		profile->file.trans.table = kcalloc(size, sizeof(char *),
467 						    GFP_KERNEL);
468 		if (!profile->file.trans.table)
469 			goto fail;
470 
471 		profile->file.trans.size = size;
472 		for (i = 0; i < size; i++) {
473 			char *str;
474 			int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
475 			/* aa_unpack_strdup verifies that the last character is
476 			 * null termination byte.
477 			 */
478 			if (!size2)
479 				goto fail;
480 			profile->file.trans.table[i] = str;
481 			/* verify that name doesn't start with space */
482 			if (isspace(*str))
483 				goto fail;
484 
485 			/* count internal #  of internal \0 */
486 			for (c = j = 0; j < size2 - 1; j++) {
487 				if (!str[j]) {
488 					pos = j;
489 					c++;
490 				}
491 			}
492 			if (*str == ':') {
493 				/* first character after : must be valid */
494 				if (!str[1])
495 					goto fail;
496 				/* beginning with : requires an embedded \0,
497 				 * verify that exactly 1 internal \0 exists
498 				 * trailing \0 already verified by aa_unpack_strdup
499 				 *
500 				 * convert \0 back to : for label_parse
501 				 */
502 				if (c == 1)
503 					str[pos] = ':';
504 				else if (c > 1)
505 					goto fail;
506 			} else if (c)
507 				/* fail - all other cases with embedded \0 */
508 				goto fail;
509 		}
510 		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
511 			goto fail;
512 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
513 			goto fail;
514 	}
515 	return true;
516 
517 fail:
518 	aa_free_domain_entries(&profile->file.trans);
519 	e->pos = saved_pos;
520 	return false;
521 }
522 
523 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
524 {
525 	void *pos = e->pos;
526 
527 	if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) {
528 		int i, size;
529 
530 		size = aa_unpack_array(e, NULL);
531 		profile->xattr_count = size;
532 		profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
533 		if (!profile->xattrs)
534 			goto fail;
535 		for (i = 0; i < size; i++) {
536 			if (!aa_unpack_strdup(e, &profile->xattrs[i], NULL))
537 				goto fail;
538 		}
539 		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
540 			goto fail;
541 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
542 			goto fail;
543 	}
544 
545 	return true;
546 
547 fail:
548 	e->pos = pos;
549 	return false;
550 }
551 
552 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
553 {
554 	void *pos = e->pos;
555 	int i, size;
556 
557 	if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) {
558 		size = aa_unpack_array(e, NULL);
559 
560 		profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
561 					   GFP_KERNEL);
562 		if (!profile->secmark)
563 			goto fail;
564 
565 		profile->secmark_count = size;
566 
567 		for (i = 0; i < size; i++) {
568 			if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
569 				goto fail;
570 			if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
571 				goto fail;
572 			if (!aa_unpack_strdup(e, &profile->secmark[i].label, NULL))
573 				goto fail;
574 		}
575 		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
576 			goto fail;
577 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
578 			goto fail;
579 	}
580 
581 	return true;
582 
583 fail:
584 	if (profile->secmark) {
585 		for (i = 0; i < size; i++)
586 			kfree(profile->secmark[i].label);
587 		kfree(profile->secmark);
588 		profile->secmark_count = 0;
589 		profile->secmark = NULL;
590 	}
591 
592 	e->pos = pos;
593 	return false;
594 }
595 
596 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
597 {
598 	void *pos = e->pos;
599 
600 	/* rlimits are optional */
601 	if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) {
602 		int i, size;
603 		u32 tmp = 0;
604 		if (!aa_unpack_u32(e, &tmp, NULL))
605 			goto fail;
606 		profile->rlimits.mask = tmp;
607 
608 		size = aa_unpack_array(e, NULL);
609 		if (size > RLIM_NLIMITS)
610 			goto fail;
611 		for (i = 0; i < size; i++) {
612 			u64 tmp2 = 0;
613 			int a = aa_map_resource(i);
614 			if (!aa_unpack_u64(e, &tmp2, NULL))
615 				goto fail;
616 			profile->rlimits.limits[a].rlim_max = tmp2;
617 		}
618 		if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
619 			goto fail;
620 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
621 			goto fail;
622 	}
623 	return true;
624 
625 fail:
626 	e->pos = pos;
627 	return false;
628 }
629 
630 static u32 strhash(const void *data, u32 len, u32 seed)
631 {
632 	const char * const *key = data;
633 
634 	return jhash(*key, strlen(*key), seed);
635 }
636 
637 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
638 {
639 	const struct aa_data *data = obj;
640 	const char * const *key = arg->key;
641 
642 	return strcmp(data->key, *key);
643 }
644 
645 /**
646  * unpack_profile - unpack a serialized profile
647  * @e: serialized data extent information (NOT NULL)
648  * @ns_name: pointer of newly allocated copy of %NULL in case of error
649  *
650  * NOTE: unpack profile sets audit struct if there is a failure
651  */
652 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
653 {
654 	struct aa_profile *profile = NULL;
655 	const char *tmpname, *tmpns = NULL, *name = NULL;
656 	const char *info = "failed to unpack profile";
657 	size_t ns_len;
658 	struct rhashtable_params params = { 0 };
659 	char *key = NULL;
660 	struct aa_data *data;
661 	int i, error = -EPROTO;
662 	kernel_cap_t tmpcap;
663 	u32 tmp;
664 
665 	*ns_name = NULL;
666 
667 	/* check that we have the right struct being passed */
668 	if (!aa_unpack_nameX(e, AA_STRUCT, "profile"))
669 		goto fail;
670 	if (!aa_unpack_str(e, &name, NULL))
671 		goto fail;
672 	if (*name == '\0')
673 		goto fail;
674 
675 	tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
676 	if (tmpns) {
677 		*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
678 		if (!*ns_name) {
679 			info = "out of memory";
680 			goto fail;
681 		}
682 		name = tmpname;
683 	}
684 
685 	profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
686 	if (!profile)
687 		return ERR_PTR(-ENOMEM);
688 
689 	/* profile renaming is optional */
690 	(void) aa_unpack_str(e, &profile->rename, "rename");
691 
692 	/* attachment string is optional */
693 	(void) aa_unpack_str(e, &profile->attach, "attach");
694 
695 	/* xmatch is optional and may be NULL */
696 	profile->xmatch = unpack_dfa(e);
697 	if (IS_ERR(profile->xmatch)) {
698 		error = PTR_ERR(profile->xmatch);
699 		profile->xmatch = NULL;
700 		info = "bad xmatch";
701 		goto fail;
702 	}
703 	/* xmatch_len is not optional if xmatch is set */
704 	if (profile->xmatch) {
705 		if (!aa_unpack_u32(e, &tmp, NULL)) {
706 			info = "missing xmatch len";
707 			goto fail;
708 		}
709 		profile->xmatch_len = tmp;
710 	}
711 
712 	/* disconnected attachment string is optional */
713 	(void) aa_unpack_str(e, &profile->disconnected, "disconnected");
714 
715 	/* per profile debug flags (complain, audit) */
716 	if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
717 		info = "profile missing flags";
718 		goto fail;
719 	}
720 	info = "failed to unpack profile flags";
721 	if (!aa_unpack_u32(e, &tmp, NULL))
722 		goto fail;
723 	if (tmp & PACKED_FLAG_HAT)
724 		profile->label.flags |= FLAG_HAT;
725 	if (tmp & PACKED_FLAG_DEBUG1)
726 		profile->label.flags |= FLAG_DEBUG1;
727 	if (tmp & PACKED_FLAG_DEBUG2)
728 		profile->label.flags |= FLAG_DEBUG2;
729 	if (!aa_unpack_u32(e, &tmp, NULL))
730 		goto fail;
731 	if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
732 		profile->mode = APPARMOR_COMPLAIN;
733 	} else if (tmp == PACKED_MODE_ENFORCE) {
734 		profile->mode = APPARMOR_ENFORCE;
735 	} else if (tmp == PACKED_MODE_KILL) {
736 		profile->mode = APPARMOR_KILL;
737 	} else if (tmp == PACKED_MODE_UNCONFINED) {
738 		profile->mode = APPARMOR_UNCONFINED;
739 		profile->label.flags |= FLAG_UNCONFINED;
740 	} else {
741 		goto fail;
742 	}
743 	if (!aa_unpack_u32(e, &tmp, NULL))
744 		goto fail;
745 	if (tmp)
746 		profile->audit = AUDIT_ALL;
747 
748 	if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
749 		goto fail;
750 
751 	/* path_flags is optional */
752 	if (aa_unpack_u32(e, &profile->path_flags, "path_flags"))
753 		profile->path_flags |= profile->label.flags &
754 			PATH_MEDIATE_DELETED;
755 	else
756 		/* set a default value if path_flags field is not present */
757 		profile->path_flags = PATH_MEDIATE_DELETED;
758 
759 	info = "failed to unpack profile capabilities";
760 	if (!aa_unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
761 		goto fail;
762 	if (!aa_unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
763 		goto fail;
764 	if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
765 		goto fail;
766 	if (!aa_unpack_u32(e, &tmpcap.cap[0], NULL))
767 		goto fail;
768 
769 	info = "failed to unpack upper profile capabilities";
770 	if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) {
771 		/* optional upper half of 64 bit caps */
772 		if (!aa_unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
773 			goto fail;
774 		if (!aa_unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
775 			goto fail;
776 		if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
777 			goto fail;
778 		if (!aa_unpack_u32(e, &(tmpcap.cap[1]), NULL))
779 			goto fail;
780 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
781 			goto fail;
782 	}
783 
784 	info = "failed to unpack extended profile capabilities";
785 	if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) {
786 		/* optional extended caps mediation mask */
787 		if (!aa_unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
788 			goto fail;
789 		if (!aa_unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
790 			goto fail;
791 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
792 			goto fail;
793 	}
794 
795 	if (!unpack_xattrs(e, profile)) {
796 		info = "failed to unpack profile xattrs";
797 		goto fail;
798 	}
799 
800 	if (!unpack_rlimits(e, profile)) {
801 		info = "failed to unpack profile rlimits";
802 		goto fail;
803 	}
804 
805 	if (!unpack_secmark(e, profile)) {
806 		info = "failed to unpack profile secmark rules";
807 		goto fail;
808 	}
809 
810 	if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) {
811 		/* generic policy dfa - optional and may be NULL */
812 		info = "failed to unpack policydb";
813 		profile->policy.dfa = unpack_dfa(e);
814 		if (IS_ERR(profile->policy.dfa)) {
815 			error = PTR_ERR(profile->policy.dfa);
816 			profile->policy.dfa = NULL;
817 			goto fail;
818 		} else if (!profile->policy.dfa) {
819 			error = -EPROTO;
820 			goto fail;
821 		}
822 		if (!aa_unpack_u32(e, &profile->policy.start[0], "start"))
823 			/* default start state */
824 			profile->policy.start[0] = DFA_START;
825 		/* setup class index */
826 		for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
827 			profile->policy.start[i] =
828 				aa_dfa_next(profile->policy.dfa,
829 					    profile->policy.start[0],
830 					    i);
831 		}
832 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
833 			goto fail;
834 	} else
835 		profile->policy.dfa = aa_get_dfa(nulldfa);
836 
837 	/* get file rules */
838 	profile->file.dfa = unpack_dfa(e);
839 	if (IS_ERR(profile->file.dfa)) {
840 		error = PTR_ERR(profile->file.dfa);
841 		profile->file.dfa = NULL;
842 		info = "failed to unpack profile file rules";
843 		goto fail;
844 	} else if (profile->file.dfa) {
845 		if (!aa_unpack_u32(e, &profile->file.start, "dfa_start"))
846 			/* default start state */
847 			profile->file.start = DFA_START;
848 	} else if (profile->policy.dfa &&
849 		   profile->policy.start[AA_CLASS_FILE]) {
850 		profile->file.dfa = aa_get_dfa(profile->policy.dfa);
851 		profile->file.start = profile->policy.start[AA_CLASS_FILE];
852 	} else
853 		profile->file.dfa = aa_get_dfa(nulldfa);
854 
855 	if (!unpack_trans_table(e, profile)) {
856 		info = "failed to unpack profile transition table";
857 		goto fail;
858 	}
859 
860 	if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
861 		info = "out of memory";
862 		profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
863 		if (!profile->data)
864 			goto fail;
865 
866 		params.nelem_hint = 3;
867 		params.key_len = sizeof(void *);
868 		params.key_offset = offsetof(struct aa_data, key);
869 		params.head_offset = offsetof(struct aa_data, head);
870 		params.hashfn = strhash;
871 		params.obj_cmpfn = datacmp;
872 
873 		if (rhashtable_init(profile->data, &params)) {
874 			info = "failed to init key, value hash table";
875 			goto fail;
876 		}
877 
878 		while (aa_unpack_strdup(e, &key, NULL)) {
879 			data = kzalloc(sizeof(*data), GFP_KERNEL);
880 			if (!data) {
881 				kfree_sensitive(key);
882 				goto fail;
883 			}
884 
885 			data->key = key;
886 			data->size = aa_unpack_blob(e, &data->data, NULL);
887 			data->data = kvmemdup(data->data, data->size);
888 			if (data->size && !data->data) {
889 				kfree_sensitive(data->key);
890 				kfree_sensitive(data);
891 				goto fail;
892 			}
893 
894 			rhashtable_insert_fast(profile->data, &data->head,
895 					       profile->data->p);
896 		}
897 
898 		if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
899 			info = "failed to unpack end of key, value data table";
900 			goto fail;
901 		}
902 	}
903 
904 	if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
905 		info = "failed to unpack end of profile";
906 		goto fail;
907 	}
908 
909 	return profile;
910 
911 fail:
912 	if (profile)
913 		name = NULL;
914 	else if (!name)
915 		name = "unknown";
916 	audit_iface(profile, NULL, name, info, e, error);
917 	aa_free_profile(profile);
918 
919 	return ERR_PTR(error);
920 }
921 
922 /**
923  * verify_header - unpack serialized stream header
924  * @e: serialized data read head (NOT NULL)
925  * @required: whether the header is required or optional
926  * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
927  *
928  * Returns: error or 0 if header is good
929  */
930 static int verify_header(struct aa_ext *e, int required, const char **ns)
931 {
932 	int error = -EPROTONOSUPPORT;
933 	const char *name = NULL;
934 	*ns = NULL;
935 
936 	/* get the interface version */
937 	if (!aa_unpack_u32(e, &e->version, "version")) {
938 		if (required) {
939 			audit_iface(NULL, NULL, NULL, "invalid profile format",
940 				    e, error);
941 			return error;
942 		}
943 	}
944 
945 	/* Check that the interface version is currently supported.
946 	 * if not specified use previous version
947 	 * Mask off everything that is not kernel abi version
948 	 */
949 	if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
950 		audit_iface(NULL, NULL, NULL, "unsupported interface version",
951 			    e, error);
952 		return error;
953 	}
954 
955 	/* read the namespace if present */
956 	if (aa_unpack_str(e, &name, "namespace")) {
957 		if (*name == '\0') {
958 			audit_iface(NULL, NULL, NULL, "invalid namespace name",
959 				    e, error);
960 			return error;
961 		}
962 		if (*ns && strcmp(*ns, name)) {
963 			audit_iface(NULL, NULL, NULL, "invalid ns change", e,
964 				    error);
965 		} else if (!*ns) {
966 			*ns = kstrdup(name, GFP_KERNEL);
967 			if (!*ns)
968 				return -ENOMEM;
969 		}
970 	}
971 
972 	return 0;
973 }
974 
975 static bool verify_xindex(int xindex, int table_size)
976 {
977 	int index, xtype;
978 	xtype = xindex & AA_X_TYPE_MASK;
979 	index = xindex & AA_X_INDEX_MASK;
980 	if (xtype == AA_X_TABLE && index >= table_size)
981 		return false;
982 	return true;
983 }
984 
985 /* verify dfa xindexes are in range of transition tables */
986 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
987 {
988 	int i;
989 	for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
990 		if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
991 			return false;
992 		if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
993 			return false;
994 	}
995 	return true;
996 }
997 
998 /**
999  * verify_profile - Do post unpack analysis to verify profile consistency
1000  * @profile: profile to verify (NOT NULL)
1001  *
1002  * Returns: 0 if passes verification else error
1003  */
1004 static int verify_profile(struct aa_profile *profile)
1005 {
1006 	if (profile->file.dfa &&
1007 	    !verify_dfa_xindex(profile->file.dfa,
1008 			       profile->file.trans.size)) {
1009 		audit_iface(profile, NULL, NULL, "Invalid named transition",
1010 			    NULL, -EPROTO);
1011 		return -EPROTO;
1012 	}
1013 
1014 	return 0;
1015 }
1016 
1017 void aa_load_ent_free(struct aa_load_ent *ent)
1018 {
1019 	if (ent) {
1020 		aa_put_profile(ent->rename);
1021 		aa_put_profile(ent->old);
1022 		aa_put_profile(ent->new);
1023 		kfree(ent->ns_name);
1024 		kfree_sensitive(ent);
1025 	}
1026 }
1027 
1028 struct aa_load_ent *aa_load_ent_alloc(void)
1029 {
1030 	struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1031 	if (ent)
1032 		INIT_LIST_HEAD(&ent->list);
1033 	return ent;
1034 }
1035 
1036 static int deflate_compress(const char *src, size_t slen, char **dst,
1037 			    size_t *dlen)
1038 {
1039 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
1040 	int error;
1041 	struct z_stream_s strm;
1042 	void *stgbuf, *dstbuf;
1043 	size_t stglen = deflateBound(slen);
1044 
1045 	memset(&strm, 0, sizeof(strm));
1046 
1047 	if (stglen < slen)
1048 		return -EFBIG;
1049 
1050 	strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
1051 							     MAX_MEM_LEVEL),
1052 				  GFP_KERNEL);
1053 	if (!strm.workspace)
1054 		return -ENOMEM;
1055 
1056 	error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
1057 	if (error != Z_OK) {
1058 		error = -ENOMEM;
1059 		goto fail_deflate_init;
1060 	}
1061 
1062 	stgbuf = kvzalloc(stglen, GFP_KERNEL);
1063 	if (!stgbuf) {
1064 		error = -ENOMEM;
1065 		goto fail_stg_alloc;
1066 	}
1067 
1068 	strm.next_in = src;
1069 	strm.avail_in = slen;
1070 	strm.next_out = stgbuf;
1071 	strm.avail_out = stglen;
1072 
1073 	error = zlib_deflate(&strm, Z_FINISH);
1074 	if (error != Z_STREAM_END) {
1075 		error = -EINVAL;
1076 		goto fail_deflate;
1077 	}
1078 	error = 0;
1079 
1080 	if (is_vmalloc_addr(stgbuf)) {
1081 		dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
1082 		if (dstbuf) {
1083 			memcpy(dstbuf, stgbuf, strm.total_out);
1084 			kvfree(stgbuf);
1085 		}
1086 	} else
1087 		/*
1088 		 * If the staging buffer was kmalloc'd, then using krealloc is
1089 		 * probably going to be faster. The destination buffer will
1090 		 * always be smaller, so it's just shrunk, avoiding a memcpy
1091 		 */
1092 		dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
1093 
1094 	if (!dstbuf) {
1095 		error = -ENOMEM;
1096 		goto fail_deflate;
1097 	}
1098 
1099 	*dst = dstbuf;
1100 	*dlen = strm.total_out;
1101 
1102 fail_stg_alloc:
1103 	zlib_deflateEnd(&strm);
1104 fail_deflate_init:
1105 	kvfree(strm.workspace);
1106 	return error;
1107 
1108 fail_deflate:
1109 	kvfree(stgbuf);
1110 	goto fail_stg_alloc;
1111 #else
1112 	*dlen = slen;
1113 	return 0;
1114 #endif
1115 }
1116 
1117 static int compress_loaddata(struct aa_loaddata *data)
1118 {
1119 
1120 	AA_BUG(data->compressed_size > 0);
1121 
1122 	/*
1123 	 * Shortcut the no compression case, else we increase the amount of
1124 	 * storage required by a small amount
1125 	 */
1126 	if (aa_g_rawdata_compression_level != 0) {
1127 		void *udata = data->data;
1128 		int error = deflate_compress(udata, data->size, &data->data,
1129 					     &data->compressed_size);
1130 		if (error)
1131 			return error;
1132 
1133 		if (udata != data->data)
1134 			kvfree(udata);
1135 	} else
1136 		data->compressed_size = data->size;
1137 
1138 	return 0;
1139 }
1140 
1141 /**
1142  * aa_unpack - unpack packed binary profile(s) data loaded from user space
1143  * @udata: user data copied to kmem  (NOT NULL)
1144  * @lh: list to place unpacked profiles in a aa_repl_ws
1145  * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1146  *
1147  * Unpack user data and return refcounted allocated profile(s) stored in
1148  * @lh in order of discovery, with the list chain stored in base.list
1149  * or error
1150  *
1151  * Returns: profile(s) on @lh else error pointer if fails to unpack
1152  */
1153 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1154 	      const char **ns)
1155 {
1156 	struct aa_load_ent *tmp, *ent;
1157 	struct aa_profile *profile = NULL;
1158 	int error;
1159 	struct aa_ext e = {
1160 		.start = udata->data,
1161 		.end = udata->data + udata->size,
1162 		.pos = udata->data,
1163 	};
1164 
1165 	*ns = NULL;
1166 	while (e.pos < e.end) {
1167 		char *ns_name = NULL;
1168 		void *start;
1169 		error = verify_header(&e, e.pos == e.start, ns);
1170 		if (error)
1171 			goto fail;
1172 
1173 		start = e.pos;
1174 		profile = unpack_profile(&e, &ns_name);
1175 		if (IS_ERR(profile)) {
1176 			error = PTR_ERR(profile);
1177 			goto fail;
1178 		}
1179 
1180 		error = verify_profile(profile);
1181 		if (error)
1182 			goto fail_profile;
1183 
1184 		if (aa_g_hash_policy)
1185 			error = aa_calc_profile_hash(profile, e.version, start,
1186 						     e.pos - start);
1187 		if (error)
1188 			goto fail_profile;
1189 
1190 		ent = aa_load_ent_alloc();
1191 		if (!ent) {
1192 			error = -ENOMEM;
1193 			goto fail_profile;
1194 		}
1195 
1196 		ent->new = profile;
1197 		ent->ns_name = ns_name;
1198 		list_add_tail(&ent->list, lh);
1199 	}
1200 	udata->abi = e.version & K_ABI_MASK;
1201 	if (aa_g_hash_policy) {
1202 		udata->hash = aa_calc_hash(udata->data, udata->size);
1203 		if (IS_ERR(udata->hash)) {
1204 			error = PTR_ERR(udata->hash);
1205 			udata->hash = NULL;
1206 			goto fail;
1207 		}
1208 	}
1209 
1210 	if (aa_g_export_binary) {
1211 		error = compress_loaddata(udata);
1212 		if (error)
1213 			goto fail;
1214 	}
1215 	return 0;
1216 
1217 fail_profile:
1218 	aa_put_profile(profile);
1219 
1220 fail:
1221 	list_for_each_entry_safe(ent, tmp, lh, list) {
1222 		list_del_init(&ent->list);
1223 		aa_load_ent_free(ent);
1224 	}
1225 
1226 	return error;
1227 }
1228