xref: /linux/security/apparmor/policy_unpack.c (revision ae6d35ed0a481824a8730c39d5b319c8a76ea00e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AppArmor security module
4  *
5  * This file contains AppArmor functions for unpacking policy loaded from
6  * userspace.
7  *
8  * Copyright (C) 1998-2008 Novell/SUSE
9  * Copyright 2009-2010 Canonical Ltd.
10  *
11  * AppArmor uses a serialized binary format for loading policy. To find
12  * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13  * All policy is validated before it is used.
14  */
15 
16 #include <asm/unaligned.h>
17 #include <linux/ctype.h>
18 #include <linux/errno.h>
19 #include <linux/zstd.h>
20 
21 #include "include/apparmor.h"
22 #include "include/audit.h"
23 #include "include/cred.h"
24 #include "include/crypto.h"
25 #include "include/file.h"
26 #include "include/match.h"
27 #include "include/path.h"
28 #include "include/policy.h"
29 #include "include/policy_unpack.h"
30 
31 #define K_ABI_MASK 0x3ff
32 #define FORCE_COMPLAIN_FLAG 0x800
33 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
34 #define VERSION_LE(X, Y) (((X) & K_ABI_MASK) <= ((Y) & K_ABI_MASK))
35 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
36 
37 #define v5	5	/* base version */
38 #define v6	6	/* per entry policydb mediation check */
39 #define v7	7
40 #define v8	8	/* full network masking */
41 #define v9	9	/* xbits are used as permission bits in policydb */
42 
43 /*
44  * The AppArmor interface treats data as a type byte followed by the
45  * actual data.  The interface has the notion of a named entry
46  * which has a name (AA_NAME typecode followed by name string) followed by
47  * the entries typecode and data.  Named types allow for optional
48  * elements and extensions to be added and tested for without breaking
49  * backwards compatibility.
50  */
51 
52 enum aa_code {
53 	AA_U8,
54 	AA_U16,
55 	AA_U32,
56 	AA_U64,
57 	AA_NAME,		/* same as string except it is items name */
58 	AA_STRING,
59 	AA_BLOB,
60 	AA_STRUCT,
61 	AA_STRUCTEND,
62 	AA_LIST,
63 	AA_LISTEND,
64 	AA_ARRAY,
65 	AA_ARRAYEND,
66 };
67 
68 /*
69  * aa_ext is the read of the buffer containing the serialized profile.  The
70  * data is copied into a kernel buffer in apparmorfs and then handed off to
71  * the unpack routines.
72  */
73 struct aa_ext {
74 	void *start;
75 	void *end;
76 	void *pos;		/* pointer to current position in the buffer */
77 	u32 version;
78 };
79 
80 /* audit callback for unpack fields */
81 static void audit_cb(struct audit_buffer *ab, void *va)
82 {
83 	struct common_audit_data *sa = va;
84 
85 	if (aad(sa)->iface.ns) {
86 		audit_log_format(ab, " ns=");
87 		audit_log_untrustedstring(ab, aad(sa)->iface.ns);
88 	}
89 	if (aad(sa)->name) {
90 		audit_log_format(ab, " name=");
91 		audit_log_untrustedstring(ab, aad(sa)->name);
92 	}
93 	if (aad(sa)->iface.pos)
94 		audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
95 }
96 
97 /**
98  * audit_iface - do audit message for policy unpacking/load/replace/remove
99  * @new: profile if it has been allocated (MAYBE NULL)
100  * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
101  * @name: name of the profile being manipulated (MAYBE NULL)
102  * @info: any extra info about the failure (MAYBE NULL)
103  * @e: buffer position info
104  * @error: error code
105  *
106  * Returns: %0 or error
107  */
108 static int audit_iface(struct aa_profile *new, const char *ns_name,
109 		       const char *name, const char *info, struct aa_ext *e,
110 		       int error)
111 {
112 	struct aa_profile *profile = labels_profile(aa_current_raw_label());
113 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
114 	if (e)
115 		aad(&sa)->iface.pos = e->pos - e->start;
116 	aad(&sa)->iface.ns = ns_name;
117 	if (new)
118 		aad(&sa)->name = new->base.hname;
119 	else
120 		aad(&sa)->name = name;
121 	aad(&sa)->info = info;
122 	aad(&sa)->error = error;
123 
124 	return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
125 }
126 
127 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
128 {
129 	AA_BUG(!data);
130 	AA_BUG(!data->ns);
131 	AA_BUG(!mutex_is_locked(&data->ns->lock));
132 	AA_BUG(data->revision > revision);
133 
134 	data->revision = revision;
135 	if ((data->dents[AAFS_LOADDATA_REVISION])) {
136 		d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
137 			current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
138 		d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
139 			current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
140 	}
141 }
142 
143 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
144 {
145 	if (l->size != r->size)
146 		return false;
147 	if (l->compressed_size != r->compressed_size)
148 		return false;
149 	if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
150 		return false;
151 	return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
152 }
153 
154 /*
155  * need to take the ns mutex lock which is NOT safe most places that
156  * put_loaddata is called, so we have to delay freeing it
157  */
158 static void do_loaddata_free(struct work_struct *work)
159 {
160 	struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
161 	struct aa_ns *ns = aa_get_ns(d->ns);
162 
163 	if (ns) {
164 		mutex_lock_nested(&ns->lock, ns->level);
165 		__aa_fs_remove_rawdata(d);
166 		mutex_unlock(&ns->lock);
167 		aa_put_ns(ns);
168 	}
169 
170 	kfree_sensitive(d->hash);
171 	kfree_sensitive(d->name);
172 	kvfree(d->data);
173 	kfree_sensitive(d);
174 }
175 
176 void aa_loaddata_kref(struct kref *kref)
177 {
178 	struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
179 
180 	if (d) {
181 		INIT_WORK(&d->work, do_loaddata_free);
182 		schedule_work(&d->work);
183 	}
184 }
185 
186 struct aa_loaddata *aa_loaddata_alloc(size_t size)
187 {
188 	struct aa_loaddata *d;
189 
190 	d = kzalloc(sizeof(*d), GFP_KERNEL);
191 	if (d == NULL)
192 		return ERR_PTR(-ENOMEM);
193 	d->data = kvzalloc(size, GFP_KERNEL);
194 	if (!d->data) {
195 		kfree(d);
196 		return ERR_PTR(-ENOMEM);
197 	}
198 	kref_init(&d->count);
199 	INIT_LIST_HEAD(&d->list);
200 
201 	return d;
202 }
203 
204 /* test if read will be in packed data bounds */
205 static bool inbounds(struct aa_ext *e, size_t size)
206 {
207 	return (size <= e->end - e->pos);
208 }
209 
210 static void *kvmemdup(const void *src, size_t len)
211 {
212 	void *p = kvmalloc(len, GFP_KERNEL);
213 
214 	if (p)
215 		memcpy(p, src, len);
216 	return p;
217 }
218 
219 /**
220  * unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
221  * @e: serialized data read head (NOT NULL)
222  * @chunk: start address for chunk of data (NOT NULL)
223  *
224  * Returns: the size of chunk found with the read head at the end of the chunk.
225  */
226 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
227 {
228 	size_t size = 0;
229 	void *pos = e->pos;
230 
231 	if (!inbounds(e, sizeof(u16)))
232 		goto fail;
233 	size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
234 	e->pos += sizeof(__le16);
235 	if (!inbounds(e, size))
236 		goto fail;
237 	*chunk = e->pos;
238 	e->pos += size;
239 	return size;
240 
241 fail:
242 	e->pos = pos;
243 	return 0;
244 }
245 
246 /* unpack control byte */
247 static bool unpack_X(struct aa_ext *e, enum aa_code code)
248 {
249 	if (!inbounds(e, 1))
250 		return false;
251 	if (*(u8 *) e->pos != code)
252 		return false;
253 	e->pos++;
254 	return true;
255 }
256 
257 /**
258  * unpack_nameX - check is the next element is of type X with a name of @name
259  * @e: serialized data extent information  (NOT NULL)
260  * @code: type code
261  * @name: name to match to the serialized element.  (MAYBE NULL)
262  *
263  * check that the next serialized data element is of type X and has a tag
264  * name @name.  If @name is specified then there must be a matching
265  * name element in the stream.  If @name is NULL any name element will be
266  * skipped and only the typecode will be tested.
267  *
268  * Returns true on success (both type code and name tests match) and the read
269  * head is advanced past the headers
270  *
271  * Returns: false if either match fails, the read head does not move
272  */
273 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
274 {
275 	/*
276 	 * May need to reset pos if name or type doesn't match
277 	 */
278 	void *pos = e->pos;
279 	/*
280 	 * Check for presence of a tagname, and if present name size
281 	 * AA_NAME tag value is a u16.
282 	 */
283 	if (unpack_X(e, AA_NAME)) {
284 		char *tag = NULL;
285 		size_t size = unpack_u16_chunk(e, &tag);
286 		/* if a name is specified it must match. otherwise skip tag */
287 		if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
288 			goto fail;
289 	} else if (name) {
290 		/* if a name is specified and there is no name tag fail */
291 		goto fail;
292 	}
293 
294 	/* now check if type code matches */
295 	if (unpack_X(e, code))
296 		return true;
297 
298 fail:
299 	e->pos = pos;
300 	return false;
301 }
302 
303 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
304 {
305 	void *pos = e->pos;
306 
307 	if (unpack_nameX(e, AA_U8, name)) {
308 		if (!inbounds(e, sizeof(u8)))
309 			goto fail;
310 		if (data)
311 			*data = *((u8 *)e->pos);
312 		e->pos += sizeof(u8);
313 		return true;
314 	}
315 
316 fail:
317 	e->pos = pos;
318 	return false;
319 }
320 
321 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
322 {
323 	void *pos = e->pos;
324 
325 	if (unpack_nameX(e, AA_U32, name)) {
326 		if (!inbounds(e, sizeof(u32)))
327 			goto fail;
328 		if (data)
329 			*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
330 		e->pos += sizeof(u32);
331 		return true;
332 	}
333 
334 fail:
335 	e->pos = pos;
336 	return false;
337 }
338 
339 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
340 {
341 	void *pos = e->pos;
342 
343 	if (unpack_nameX(e, AA_U64, name)) {
344 		if (!inbounds(e, sizeof(u64)))
345 			goto fail;
346 		if (data)
347 			*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
348 		e->pos += sizeof(u64);
349 		return true;
350 	}
351 
352 fail:
353 	e->pos = pos;
354 	return false;
355 }
356 
357 static size_t unpack_array(struct aa_ext *e, const char *name)
358 {
359 	void *pos = e->pos;
360 
361 	if (unpack_nameX(e, AA_ARRAY, name)) {
362 		int size;
363 		if (!inbounds(e, sizeof(u16)))
364 			goto fail;
365 		size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
366 		e->pos += sizeof(u16);
367 		return size;
368 	}
369 
370 fail:
371 	e->pos = pos;
372 	return 0;
373 }
374 
375 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
376 {
377 	void *pos = e->pos;
378 
379 	if (unpack_nameX(e, AA_BLOB, name)) {
380 		u32 size;
381 		if (!inbounds(e, sizeof(u32)))
382 			goto fail;
383 		size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
384 		e->pos += sizeof(u32);
385 		if (inbounds(e, (size_t) size)) {
386 			*blob = e->pos;
387 			e->pos += size;
388 			return size;
389 		}
390 	}
391 
392 fail:
393 	e->pos = pos;
394 	return 0;
395 }
396 
397 static int unpack_str(struct aa_ext *e, const char **string, const char *name)
398 {
399 	char *src_str;
400 	size_t size = 0;
401 	void *pos = e->pos;
402 	*string = NULL;
403 	if (unpack_nameX(e, AA_STRING, name)) {
404 		size = unpack_u16_chunk(e, &src_str);
405 		if (size) {
406 			/* strings are null terminated, length is size - 1 */
407 			if (src_str[size - 1] != 0)
408 				goto fail;
409 			*string = src_str;
410 
411 			return size;
412 		}
413 	}
414 
415 fail:
416 	e->pos = pos;
417 	return 0;
418 }
419 
420 static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
421 {
422 	const char *tmp;
423 	void *pos = e->pos;
424 	int res = unpack_str(e, &tmp, name);
425 	*string = NULL;
426 
427 	if (!res)
428 		return 0;
429 
430 	*string = kmemdup(tmp, res, GFP_KERNEL);
431 	if (!*string) {
432 		e->pos = pos;
433 		return 0;
434 	}
435 
436 	return res;
437 }
438 
439 
440 /**
441  * unpack_dfa - unpack a file rule dfa
442  * @e: serialized data extent information (NOT NULL)
443  *
444  * returns dfa or ERR_PTR or NULL if no dfa
445  */
446 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
447 {
448 	char *blob = NULL;
449 	size_t size;
450 	struct aa_dfa *dfa = NULL;
451 
452 	size = unpack_blob(e, &blob, "aadfa");
453 	if (size) {
454 		/*
455 		 * The dfa is aligned with in the blob to 8 bytes
456 		 * from the beginning of the stream.
457 		 * alignment adjust needed by dfa unpack
458 		 */
459 		size_t sz = blob - (char *) e->start -
460 			((e->pos - e->start) & 7);
461 		size_t pad = ALIGN(sz, 8) - sz;
462 		int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
463 			TO_ACCEPT2_FLAG(YYTD_DATA32);
464 		if (aa_g_paranoid_load)
465 			flags |= DFA_FLAG_VERIFY_STATES;
466 		dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
467 
468 		if (IS_ERR(dfa))
469 			return dfa;
470 
471 	}
472 
473 	return dfa;
474 }
475 
476 /**
477  * unpack_trans_table - unpack a profile transition table
478  * @e: serialized data extent information  (NOT NULL)
479  * @profile: profile to add the accept table to (NOT NULL)
480  *
481  * Returns: true if table successfully unpacked
482  */
483 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
484 {
485 	void *saved_pos = e->pos;
486 
487 	/* exec table is optional */
488 	if (unpack_nameX(e, AA_STRUCT, "xtable")) {
489 		int i, size;
490 
491 		size = unpack_array(e, NULL);
492 		/* currently 2^24 bits entries 0-3 */
493 		if (size > (1 << 24))
494 			goto fail;
495 		profile->file.trans.table = kcalloc(size, sizeof(char *),
496 						    GFP_KERNEL);
497 		if (!profile->file.trans.table)
498 			goto fail;
499 
500 		profile->file.trans.size = size;
501 		for (i = 0; i < size; i++) {
502 			char *str;
503 			int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
504 			/* unpack_strdup verifies that the last character is
505 			 * null termination byte.
506 			 */
507 			if (!size2)
508 				goto fail;
509 			profile->file.trans.table[i] = str;
510 			/* verify that name doesn't start with space */
511 			if (isspace(*str))
512 				goto fail;
513 
514 			/* count internal #  of internal \0 */
515 			for (c = j = 0; j < size2 - 1; j++) {
516 				if (!str[j]) {
517 					pos = j;
518 					c++;
519 				}
520 			}
521 			if (*str == ':') {
522 				/* first character after : must be valid */
523 				if (!str[1])
524 					goto fail;
525 				/* beginning with : requires an embedded \0,
526 				 * verify that exactly 1 internal \0 exists
527 				 * trailing \0 already verified by unpack_strdup
528 				 *
529 				 * convert \0 back to : for label_parse
530 				 */
531 				if (c == 1)
532 					str[pos] = ':';
533 				else if (c > 1)
534 					goto fail;
535 			} else if (c)
536 				/* fail - all other cases with embedded \0 */
537 				goto fail;
538 		}
539 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
540 			goto fail;
541 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
542 			goto fail;
543 	}
544 	return true;
545 
546 fail:
547 	aa_free_domain_entries(&profile->file.trans);
548 	e->pos = saved_pos;
549 	return false;
550 }
551 
552 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
553 {
554 	void *pos = e->pos;
555 
556 	if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
557 		int i, size;
558 
559 		size = unpack_array(e, NULL);
560 		profile->xattr_count = size;
561 		profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
562 		if (!profile->xattrs)
563 			goto fail;
564 		for (i = 0; i < size; i++) {
565 			if (!unpack_strdup(e, &profile->xattrs[i], NULL))
566 				goto fail;
567 		}
568 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
569 			goto fail;
570 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
571 			goto fail;
572 	}
573 
574 	return true;
575 
576 fail:
577 	e->pos = pos;
578 	return false;
579 }
580 
581 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
582 {
583 	void *pos = e->pos;
584 	int i, size;
585 
586 	if (unpack_nameX(e, AA_STRUCT, "secmark")) {
587 		size = unpack_array(e, NULL);
588 
589 		profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
590 					   GFP_KERNEL);
591 		if (!profile->secmark)
592 			goto fail;
593 
594 		profile->secmark_count = size;
595 
596 		for (i = 0; i < size; i++) {
597 			if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
598 				goto fail;
599 			if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
600 				goto fail;
601 			if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
602 				goto fail;
603 		}
604 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
605 			goto fail;
606 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
607 			goto fail;
608 	}
609 
610 	return true;
611 
612 fail:
613 	if (profile->secmark) {
614 		for (i = 0; i < size; i++)
615 			kfree(profile->secmark[i].label);
616 		kfree(profile->secmark);
617 		profile->secmark_count = 0;
618 		profile->secmark = NULL;
619 	}
620 
621 	e->pos = pos;
622 	return false;
623 }
624 
625 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
626 {
627 	void *pos = e->pos;
628 
629 	/* rlimits are optional */
630 	if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
631 		int i, size;
632 		u32 tmp = 0;
633 		if (!unpack_u32(e, &tmp, NULL))
634 			goto fail;
635 		profile->rlimits.mask = tmp;
636 
637 		size = unpack_array(e, NULL);
638 		if (size > RLIM_NLIMITS)
639 			goto fail;
640 		for (i = 0; i < size; i++) {
641 			u64 tmp2 = 0;
642 			int a = aa_map_resource(i);
643 			if (!unpack_u64(e, &tmp2, NULL))
644 				goto fail;
645 			profile->rlimits.limits[a].rlim_max = tmp2;
646 		}
647 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
648 			goto fail;
649 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
650 			goto fail;
651 	}
652 	return true;
653 
654 fail:
655 	e->pos = pos;
656 	return false;
657 }
658 
659 static u32 strhash(const void *data, u32 len, u32 seed)
660 {
661 	const char * const *key = data;
662 
663 	return jhash(*key, strlen(*key), seed);
664 }
665 
666 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
667 {
668 	const struct aa_data *data = obj;
669 	const char * const *key = arg->key;
670 
671 	return strcmp(data->key, *key);
672 }
673 
674 /* remap old accept table embedded permissions to separate permission table */
675 static u32 dfa_map_xindex(u16 mask)
676 {
677 	u16 old_index = (mask >> 10) & 0xf;
678 	u32 index = 0;
679 
680 	if (mask & 0x100)
681 		index |= AA_X_UNSAFE;
682 	if (mask & 0x200)
683 		index |= AA_X_INHERIT;
684 	if (mask & 0x80)
685 		index |= AA_X_UNCONFINED;
686 
687 	if (old_index == 1) {
688 		index |= AA_X_UNCONFINED;
689 	} else if (old_index == 2) {
690 		index |= AA_X_NAME;
691 	} else if (old_index == 3) {
692 		index |= AA_X_NAME | AA_X_CHILD;
693 	} else if (old_index) {
694 		index |= AA_X_TABLE;
695 		index |= old_index - 4;
696 	}
697 
698 	return index;
699 }
700 
701 /*
702  * map old dfa inline permissions to new format
703  */
704 #define dfa_user_allow(dfa, state) (((ACCEPT_TABLE(dfa)[state]) & 0x7f) | \
705 				    ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
706 #define dfa_user_xbits(dfa, state) (((ACCEPT_TABLE(dfa)[state]) >> 7) & 0x7f)
707 #define dfa_user_audit(dfa, state) ((ACCEPT_TABLE2(dfa)[state]) & 0x7f)
708 #define dfa_user_quiet(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 7) & 0x7f)
709 #define dfa_user_xindex(dfa, state) \
710 	(dfa_map_xindex(ACCEPT_TABLE(dfa)[state] & 0x3fff))
711 
712 #define dfa_other_allow(dfa, state) ((((ACCEPT_TABLE(dfa)[state]) >> 14) & \
713 				      0x7f) |				\
714 				     ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
715 #define dfa_other_xbits(dfa, state) \
716 	((((ACCEPT_TABLE(dfa)[state]) >> 7) >> 14) & 0x7f)
717 #define dfa_other_audit(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 14) & 0x7f)
718 #define dfa_other_quiet(dfa, state) \
719 	((((ACCEPT_TABLE2(dfa)[state]) >> 7) >> 14) & 0x7f)
720 #define dfa_other_xindex(dfa, state) \
721 	dfa_map_xindex((ACCEPT_TABLE(dfa)[state] >> 14) & 0x3fff)
722 
723 /**
724  * map_old_perms - map old file perms layout to the new layout
725  * @old: permission set in old mapping
726  *
727  * Returns: new permission mapping
728  */
729 static u32 map_old_perms(u32 old)
730 {
731 	u32 new = old & 0xf;
732 
733 	if (old & MAY_READ)
734 		new |= AA_MAY_GETATTR | AA_MAY_OPEN;
735 	if (old & MAY_WRITE)
736 		new |= AA_MAY_SETATTR | AA_MAY_CREATE | AA_MAY_DELETE |
737 		       AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_OPEN;
738 	if (old & 0x10)
739 		new |= AA_MAY_LINK;
740 	/* the old mapping lock and link_subset flags where overlaid
741 	 * and use was determined by part of a pair that they were in
742 	 */
743 	if (old & 0x20)
744 		new |= AA_MAY_LOCK | AA_LINK_SUBSET;
745 	if (old & 0x40)	/* AA_EXEC_MMAP */
746 		new |= AA_EXEC_MMAP;
747 
748 	return new;
749 }
750 
751 static void compute_fperms_allow(struct aa_perms *perms, struct aa_dfa *dfa,
752 				 aa_state_t state)
753 {
754 	perms->allow |= AA_MAY_GETATTR;
755 
756 	/* change_profile wasn't determined by ownership in old mapping */
757 	if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
758 		perms->allow |= AA_MAY_CHANGE_PROFILE;
759 	if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
760 		perms->allow |= AA_MAY_ONEXEC;
761 }
762 
763 static struct aa_perms compute_fperms_user(struct aa_dfa *dfa,
764 					   aa_state_t state)
765 {
766 	struct aa_perms perms = { };
767 
768 	perms.allow = map_old_perms(dfa_user_allow(dfa, state));
769 	perms.audit = map_old_perms(dfa_user_audit(dfa, state));
770 	perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
771 	perms.xindex = dfa_user_xindex(dfa, state);
772 
773 	compute_fperms_allow(&perms, dfa, state);
774 
775 	return perms;
776 }
777 
778 static struct aa_perms compute_fperms_other(struct aa_dfa *dfa,
779 					    aa_state_t state)
780 {
781 	struct aa_perms perms = { };
782 
783 	perms.allow = map_old_perms(dfa_other_allow(dfa, state));
784 	perms.audit = map_old_perms(dfa_other_audit(dfa, state));
785 	perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
786 	perms.xindex = dfa_other_xindex(dfa, state);
787 
788 	compute_fperms_allow(&perms, dfa, state);
789 
790 	return perms;
791 }
792 
793 /**
794  * aa_compute_fperms - convert dfa compressed perms to internal perms and store
795  *		       them so they can be retrieved later.
796  * @dfa: a dfa using fperms to remap to internal permissions
797  *
798  * Returns: remapped perm table
799  */
800 static struct aa_perms *compute_fperms(struct aa_dfa *dfa)
801 {
802 	aa_state_t state;
803 	unsigned int state_count;
804 	struct aa_perms *table;
805 
806 	AA_BUG(!dfa);
807 
808 	state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
809 	/* DFAs are restricted from having a state_count of less than 2 */
810 	table = kvcalloc(state_count * 2, sizeof(struct aa_perms), GFP_KERNEL);
811 	if (!table)
812 		return NULL;
813 
814 	/* zero init so skip the trap state (state == 0) */
815 	for (state = 1; state < state_count; state++) {
816 		table[state * 2] = compute_fperms_user(dfa, state);
817 		table[state * 2 + 1] = compute_fperms_other(dfa, state);
818 	}
819 
820 	return table;
821 }
822 
823 static struct aa_perms *compute_xmatch_perms(struct aa_dfa *xmatch)
824 {
825 	struct aa_perms *perms;
826 	int state;
827 	int state_count;
828 
829 	AA_BUG(!xmatch);
830 
831 	state_count = xmatch->tables[YYTD_ID_BASE]->td_lolen;
832 	/* DFAs are restricted from having a state_count of less than 2 */
833 	perms = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
834 
835 	/* zero init so skip the trap state (state == 0) */
836 	for (state = 1; state < state_count; state++)
837 		perms[state].allow = dfa_user_allow(xmatch, state);
838 
839 	return perms;
840 }
841 
842 static u32 map_other(u32 x)
843 {
844 	return ((x & 0x3) << 8) |	/* SETATTR/GETATTR */
845 		((x & 0x1c) << 18) |	/* ACCEPT/BIND/LISTEN */
846 		((x & 0x60) << 19);	/* SETOPT/GETOPT */
847 }
848 
849 static u32 map_xbits(u32 x)
850 {
851 	return ((x & 0x1) << 7) |
852 		((x & 0x7e) << 9);
853 }
854 
855 static struct aa_perms compute_perms_entry(struct aa_dfa *dfa,
856 					   aa_state_t state,
857 					   u32 version)
858 {
859 	struct aa_perms perms = { };
860 
861 	perms.allow = dfa_user_allow(dfa, state);
862 	perms.audit = dfa_user_audit(dfa, state);
863 	perms.quiet = dfa_user_quiet(dfa, state);
864 
865 	/*
866 	 * This mapping is convulated due to history.
867 	 * v1-v4: only file perms, which are handled by compute_fperms
868 	 * v5: added policydb which dropped user conditional to gain new
869 	 *     perm bits, but had to map around the xbits because the
870 	 *     userspace compiler was still munging them.
871 	 * v9: adds using the xbits in policydb because the compiler now
872 	 *     supports treating policydb permission bits different.
873 	 *     Unfortunately there is no way to force auditing on the
874 	 *     perms represented by the xbits
875 	 */
876 	perms.allow |= map_other(dfa_other_allow(dfa, state));
877 	if (VERSION_LE(version, v8))
878 		perms.allow |= AA_MAY_LOCK;
879 	else
880 		perms.allow |= map_xbits(dfa_user_xbits(dfa, state));
881 
882 	/*
883 	 * for v5-v9 perm mapping in the policydb, the other set is used
884 	 * to extend the general perm set
885 	 */
886 	perms.audit |= map_other(dfa_other_audit(dfa, state));
887 	perms.quiet |= map_other(dfa_other_quiet(dfa, state));
888 	if (VERSION_GT(version, v8))
889 		perms.quiet |= map_xbits(dfa_other_xbits(dfa, state));
890 
891 	return perms;
892 }
893 
894 static struct aa_perms *compute_perms(struct aa_dfa *dfa, u32 version)
895 {
896 	unsigned int state;
897 	unsigned int state_count;
898 	struct aa_perms *table;
899 
900 	AA_BUG(!dfa);
901 
902 	state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
903 	/* DFAs are restricted from having a state_count of less than 2 */
904 	table = kvcalloc(state_count, sizeof(struct aa_perms), GFP_KERNEL);
905 	if (!table)
906 		return NULL;
907 
908 	/* zero init so skip the trap state (state == 0) */
909 	for (state = 1; state < state_count; state++)
910 		table[state] = compute_perms_entry(dfa, state, version);
911 
912 	return table;
913 }
914 
915 /**
916  * remap_dfa_accept - remap old dfa accept table to be an index
917  * @dfa: dfa to do the remapping on
918  * @factor: scaling factor for the index conversion.
919  *
920  * Used in conjunction with compute_Xperms, it converts old style perms
921  * that are encoded in the dfa accept tables to the new style where
922  * there is a permission table and the accept table is an index into
923  * the permission table.
924  */
925 static void remap_dfa_accept(struct aa_dfa *dfa, unsigned int factor)
926 {
927 	unsigned int state;
928 	unsigned int state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
929 
930 	AA_BUG(!dfa);
931 
932 	for (state = 0; state < state_count; state++)
933 		ACCEPT_TABLE(dfa)[state] = state * factor;
934 	kvfree(dfa->tables[YYTD_ID_ACCEPT2]);
935 	dfa->tables[YYTD_ID_ACCEPT2] = NULL;
936 }
937 
938 /**
939  * unpack_profile - unpack a serialized profile
940  * @e: serialized data extent information (NOT NULL)
941  * @ns_name: pointer of newly allocated copy of %NULL in case of error
942  *
943  * NOTE: unpack profile sets audit struct if there is a failure
944  */
945 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
946 {
947 	struct aa_profile *profile = NULL;
948 	const char *tmpname, *tmpns = NULL, *name = NULL;
949 	const char *info = "failed to unpack profile";
950 	size_t ns_len;
951 	struct rhashtable_params params = { 0 };
952 	char *key = NULL;
953 	struct aa_data *data;
954 	int i, error = -EPROTO;
955 	kernel_cap_t tmpcap;
956 	u32 tmp;
957 
958 	*ns_name = NULL;
959 
960 	/* check that we have the right struct being passed */
961 	if (!unpack_nameX(e, AA_STRUCT, "profile"))
962 		goto fail;
963 	if (!unpack_str(e, &name, NULL))
964 		goto fail;
965 	if (*name == '\0')
966 		goto fail;
967 
968 	tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
969 	if (tmpns) {
970 		*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
971 		if (!*ns_name) {
972 			info = "out of memory";
973 			goto fail;
974 		}
975 		name = tmpname;
976 	}
977 
978 	profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
979 	if (!profile)
980 		return ERR_PTR(-ENOMEM);
981 
982 	/* profile renaming is optional */
983 	(void) unpack_str(e, &profile->rename, "rename");
984 
985 	/* attachment string is optional */
986 	(void) unpack_str(e, &profile->attach, "attach");
987 
988 	/* xmatch is optional and may be NULL */
989 	profile->xmatch.dfa = unpack_dfa(e);
990 	if (IS_ERR(profile->xmatch.dfa)) {
991 		error = PTR_ERR(profile->xmatch.dfa);
992 		profile->xmatch.dfa = NULL;
993 		info = "bad xmatch";
994 		goto fail;
995 	}
996 	/* neither xmatch_len not xmatch_perms are optional if xmatch is set */
997 	if (profile->xmatch.dfa) {
998 		if (!unpack_u32(e, &tmp, NULL)) {
999 			info = "missing xmatch len";
1000 			goto fail;
1001 		}
1002 		profile->xmatch_len = tmp;
1003 		profile->xmatch.start[AA_CLASS_XMATCH] = DFA_START;
1004 		profile->xmatch.perms = compute_xmatch_perms(profile->xmatch.dfa);
1005 		if (!profile->xmatch.perms) {
1006 			info = "failed to convert xmatch permission table";
1007 			goto fail;
1008 		}
1009 		remap_dfa_accept(profile->xmatch.dfa, 1);
1010 	}
1011 
1012 	/* disconnected attachment string is optional */
1013 	(void) unpack_str(e, &profile->disconnected, "disconnected");
1014 
1015 	/* per profile debug flags (complain, audit) */
1016 	if (!unpack_nameX(e, AA_STRUCT, "flags")) {
1017 		info = "profile missing flags";
1018 		goto fail;
1019 	}
1020 	info = "failed to unpack profile flags";
1021 	if (!unpack_u32(e, &tmp, NULL))
1022 		goto fail;
1023 	if (tmp & PACKED_FLAG_HAT)
1024 		profile->label.flags |= FLAG_HAT;
1025 	if (tmp & PACKED_FLAG_DEBUG1)
1026 		profile->label.flags |= FLAG_DEBUG1;
1027 	if (tmp & PACKED_FLAG_DEBUG2)
1028 		profile->label.flags |= FLAG_DEBUG2;
1029 	if (!unpack_u32(e, &tmp, NULL))
1030 		goto fail;
1031 	if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
1032 		profile->mode = APPARMOR_COMPLAIN;
1033 	} else if (tmp == PACKED_MODE_ENFORCE) {
1034 		profile->mode = APPARMOR_ENFORCE;
1035 	} else if (tmp == PACKED_MODE_KILL) {
1036 		profile->mode = APPARMOR_KILL;
1037 	} else if (tmp == PACKED_MODE_UNCONFINED) {
1038 		profile->mode = APPARMOR_UNCONFINED;
1039 		profile->label.flags |= FLAG_UNCONFINED;
1040 	} else {
1041 		goto fail;
1042 	}
1043 	if (!unpack_u32(e, &tmp, NULL))
1044 		goto fail;
1045 	if (tmp)
1046 		profile->audit = AUDIT_ALL;
1047 
1048 	if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1049 		goto fail;
1050 
1051 	/* path_flags is optional */
1052 	if (unpack_u32(e, &profile->path_flags, "path_flags"))
1053 		profile->path_flags |= profile->label.flags &
1054 			PATH_MEDIATE_DELETED;
1055 	else
1056 		/* set a default value if path_flags field is not present */
1057 		profile->path_flags = PATH_MEDIATE_DELETED;
1058 
1059 	info = "failed to unpack profile capabilities";
1060 	if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
1061 		goto fail;
1062 	if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
1063 		goto fail;
1064 	if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
1065 		goto fail;
1066 	if (!unpack_u32(e, &tmpcap.cap[0], NULL))
1067 		goto fail;
1068 
1069 	info = "failed to unpack upper profile capabilities";
1070 	if (unpack_nameX(e, AA_STRUCT, "caps64")) {
1071 		/* optional upper half of 64 bit caps */
1072 		if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
1073 			goto fail;
1074 		if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
1075 			goto fail;
1076 		if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
1077 			goto fail;
1078 		if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
1079 			goto fail;
1080 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1081 			goto fail;
1082 	}
1083 
1084 	info = "failed to unpack extended profile capabilities";
1085 	if (unpack_nameX(e, AA_STRUCT, "capsx")) {
1086 		/* optional extended caps mediation mask */
1087 		if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
1088 			goto fail;
1089 		if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
1090 			goto fail;
1091 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1092 			goto fail;
1093 	}
1094 
1095 	if (!unpack_xattrs(e, profile)) {
1096 		info = "failed to unpack profile xattrs";
1097 		goto fail;
1098 	}
1099 
1100 	if (!unpack_rlimits(e, profile)) {
1101 		info = "failed to unpack profile rlimits";
1102 		goto fail;
1103 	}
1104 
1105 	if (!unpack_secmark(e, profile)) {
1106 		info = "failed to unpack profile secmark rules";
1107 		goto fail;
1108 	}
1109 
1110 	if (unpack_nameX(e, AA_STRUCT, "policydb")) {
1111 		/* generic policy dfa - optional and may be NULL */
1112 		info = "failed to unpack policydb";
1113 		profile->policy.dfa = unpack_dfa(e);
1114 		if (IS_ERR(profile->policy.dfa)) {
1115 			error = PTR_ERR(profile->policy.dfa);
1116 			profile->policy.dfa = NULL;
1117 			goto fail;
1118 		} else if (!profile->policy.dfa) {
1119 			error = -EPROTO;
1120 			goto fail;
1121 		}
1122 		if (!unpack_u32(e, &profile->policy.start[0], "start"))
1123 			/* default start state */
1124 			profile->policy.start[0] = DFA_START;
1125 		/* setup class index */
1126 		for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
1127 			profile->policy.start[i] =
1128 				aa_dfa_next(profile->policy.dfa,
1129 					    profile->policy.start[0],
1130 					    i);
1131 		}
1132 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
1133 			goto fail;
1134 		profile->policy.perms = compute_perms(profile->policy.dfa,
1135 						      e->version);
1136 		if (!profile->policy.perms) {
1137 			info = "failed to remap policydb permission table";
1138 			goto fail;
1139 		}
1140 		/* Do not remap internal dfas */
1141 		remap_dfa_accept(profile->policy.dfa, 1);
1142 	} else
1143 		profile->policy.dfa = aa_get_dfa(nulldfa);
1144 
1145 	/* get file rules */
1146 	profile->file.dfa = unpack_dfa(e);
1147 	if (IS_ERR(profile->file.dfa)) {
1148 		error = PTR_ERR(profile->file.dfa);
1149 		profile->file.dfa = NULL;
1150 		info = "failed to unpack profile file rules";
1151 		goto fail;
1152 	} else if (profile->file.dfa) {
1153 		if (!unpack_u32(e, &profile->file.start[AA_CLASS_FILE],
1154 				"dfa_start"))
1155 			/* default start state */
1156 			profile->file.start[AA_CLASS_FILE] = DFA_START;
1157 		profile->file.perms = compute_fperms(profile->file.dfa);
1158 		if (!profile->file.perms) {
1159 			info = "failed to remap file permission table";
1160 			goto fail;
1161 		}
1162 		remap_dfa_accept(profile->file.dfa, 2);
1163 		if (!unpack_trans_table(e, profile)) {
1164 			info = "failed to unpack profile transition table";
1165 			goto fail;
1166 		}
1167 	} else if (profile->policy.dfa &&
1168 		   profile->policy.start[AA_CLASS_FILE]) {
1169 		profile->file.dfa = aa_get_dfa(profile->policy.dfa);
1170 		profile->file.start[AA_CLASS_FILE] = profile->policy.start[AA_CLASS_FILE];
1171 	} else
1172 		profile->file.dfa = aa_get_dfa(nulldfa);
1173 
1174 	if (unpack_nameX(e, AA_STRUCT, "data")) {
1175 		info = "out of memory";
1176 		profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
1177 		if (!profile->data)
1178 			goto fail;
1179 
1180 		params.nelem_hint = 3;
1181 		params.key_len = sizeof(void *);
1182 		params.key_offset = offsetof(struct aa_data, key);
1183 		params.head_offset = offsetof(struct aa_data, head);
1184 		params.hashfn = strhash;
1185 		params.obj_cmpfn = datacmp;
1186 
1187 		if (rhashtable_init(profile->data, &params)) {
1188 			info = "failed to init key, value hash table";
1189 			goto fail;
1190 		}
1191 
1192 		while (unpack_strdup(e, &key, NULL)) {
1193 			data = kzalloc(sizeof(*data), GFP_KERNEL);
1194 			if (!data) {
1195 				kfree_sensitive(key);
1196 				goto fail;
1197 			}
1198 
1199 			data->key = key;
1200 			data->size = unpack_blob(e, &data->data, NULL);
1201 			data->data = kvmemdup(data->data, data->size);
1202 			if (data->size && !data->data) {
1203 				kfree_sensitive(data->key);
1204 				kfree_sensitive(data);
1205 				goto fail;
1206 			}
1207 
1208 			rhashtable_insert_fast(profile->data, &data->head,
1209 					       profile->data->p);
1210 		}
1211 
1212 		if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
1213 			info = "failed to unpack end of key, value data table";
1214 			goto fail;
1215 		}
1216 	}
1217 
1218 	if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
1219 		info = "failed to unpack end of profile";
1220 		goto fail;
1221 	}
1222 
1223 	return profile;
1224 
1225 fail:
1226 	if (profile)
1227 		name = NULL;
1228 	else if (!name)
1229 		name = "unknown";
1230 	audit_iface(profile, NULL, name, info, e, error);
1231 	aa_free_profile(profile);
1232 
1233 	return ERR_PTR(error);
1234 }
1235 
1236 /**
1237  * verify_header - unpack serialized stream header
1238  * @e: serialized data read head (NOT NULL)
1239  * @required: whether the header is required or optional
1240  * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
1241  *
1242  * Returns: error or 0 if header is good
1243  */
1244 static int verify_header(struct aa_ext *e, int required, const char **ns)
1245 {
1246 	int error = -EPROTONOSUPPORT;
1247 	const char *name = NULL;
1248 	*ns = NULL;
1249 
1250 	/* get the interface version */
1251 	if (!unpack_u32(e, &e->version, "version")) {
1252 		if (required) {
1253 			audit_iface(NULL, NULL, NULL, "invalid profile format",
1254 				    e, error);
1255 			return error;
1256 		}
1257 	}
1258 
1259 	/* Check that the interface version is currently supported.
1260 	 * if not specified use previous version
1261 	 * Mask off everything that is not kernel abi version
1262 	 */
1263 	if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) {
1264 		audit_iface(NULL, NULL, NULL, "unsupported interface version",
1265 			    e, error);
1266 		return error;
1267 	}
1268 
1269 	/* read the namespace if present */
1270 	if (unpack_str(e, &name, "namespace")) {
1271 		if (*name == '\0') {
1272 			audit_iface(NULL, NULL, NULL, "invalid namespace name",
1273 				    e, error);
1274 			return error;
1275 		}
1276 		if (*ns && strcmp(*ns, name)) {
1277 			audit_iface(NULL, NULL, NULL, "invalid ns change", e,
1278 				    error);
1279 		} else if (!*ns) {
1280 			*ns = kstrdup(name, GFP_KERNEL);
1281 			if (!*ns)
1282 				return -ENOMEM;
1283 		}
1284 	}
1285 
1286 	return 0;
1287 }
1288 
1289 static bool verify_xindex(int xindex, int table_size)
1290 {
1291 	int index, xtype;
1292 	xtype = xindex & AA_X_TYPE_MASK;
1293 	index = xindex & AA_X_INDEX_MASK;
1294 	if (xtype == AA_X_TABLE && index >= table_size)
1295 		return false;
1296 	return true;
1297 }
1298 
1299 /* verify dfa xindexes are in range of transition tables */
1300 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
1301 {
1302 	int i;
1303 	for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
1304 		if (!verify_xindex(ACCEPT_TABLE(dfa)[i], table_size))
1305 			return false;
1306 	}
1307 	return true;
1308 }
1309 
1310 /**
1311  * verify_profile - Do post unpack analysis to verify profile consistency
1312  * @profile: profile to verify (NOT NULL)
1313  *
1314  * Returns: 0 if passes verification else error
1315  *
1316  * This verification is post any unpack mapping or changes
1317  */
1318 static int verify_profile(struct aa_profile *profile)
1319 {
1320 	if ((profile->file.dfa &&
1321 	     !verify_dfa_xindex(profile->file.dfa,
1322 				profile->file.trans.size)) ||
1323 	    (profile->policy.dfa &&
1324 	     !verify_dfa_xindex(profile->policy.dfa,
1325 				profile->policy.trans.size))) {
1326 		audit_iface(profile, NULL, NULL,
1327 			    "Unpack: Invalid named transition", NULL, -EPROTO);
1328 		return -EPROTO;
1329 	}
1330 
1331 	return 0;
1332 }
1333 
1334 void aa_load_ent_free(struct aa_load_ent *ent)
1335 {
1336 	if (ent) {
1337 		aa_put_profile(ent->rename);
1338 		aa_put_profile(ent->old);
1339 		aa_put_profile(ent->new);
1340 		kfree(ent->ns_name);
1341 		kfree_sensitive(ent);
1342 	}
1343 }
1344 
1345 struct aa_load_ent *aa_load_ent_alloc(void)
1346 {
1347 	struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1348 	if (ent)
1349 		INIT_LIST_HEAD(&ent->list);
1350 	return ent;
1351 }
1352 
1353 static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen)
1354 {
1355 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
1356 	const zstd_parameters params =
1357 		zstd_get_params(aa_g_rawdata_compression_level, slen);
1358 	const size_t wksp_len = zstd_cctx_workspace_bound(&params.cParams);
1359 	void *wksp = NULL;
1360 	zstd_cctx *ctx = NULL;
1361 	size_t out_len = zstd_compress_bound(slen);
1362 	void *out = NULL;
1363 	int ret = 0;
1364 
1365 	out = kvzalloc(out_len, GFP_KERNEL);
1366 	if (!out) {
1367 		ret = -ENOMEM;
1368 		goto cleanup;
1369 	}
1370 
1371 	wksp = kvzalloc(wksp_len, GFP_KERNEL);
1372 	if (!wksp) {
1373 		ret = -ENOMEM;
1374 		goto cleanup;
1375 	}
1376 
1377 	ctx = zstd_init_cctx(wksp, wksp_len);
1378 	if (!ctx) {
1379 		ret = -EINVAL;
1380 		goto cleanup;
1381 	}
1382 
1383 	out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, &params);
1384 	if (zstd_is_error(out_len)) {
1385 		ret = -EINVAL;
1386 		goto cleanup;
1387 	}
1388 
1389 	if (is_vmalloc_addr(out)) {
1390 		*dst = kvzalloc(out_len, GFP_KERNEL);
1391 		if (*dst) {
1392 			memcpy(*dst, out, out_len);
1393 			kvfree(out);
1394 			out = NULL;
1395 		}
1396 	} else {
1397 		/*
1398 		 * If the staging buffer was kmalloc'd, then using krealloc is
1399 		 * probably going to be faster. The destination buffer will
1400 		 * always be smaller, so it's just shrunk, avoiding a memcpy
1401 		 */
1402 		*dst = krealloc(out, out_len, GFP_KERNEL);
1403 	}
1404 
1405 	if (!*dst) {
1406 		ret = -ENOMEM;
1407 		goto cleanup;
1408 	}
1409 
1410 	*dlen = out_len;
1411 
1412 cleanup:
1413 	if (ret) {
1414 		kvfree(out);
1415 		*dst = NULL;
1416 	}
1417 
1418 	kvfree(wksp);
1419 	return ret;
1420 #else
1421 	*dlen = slen;
1422 	return 0;
1423 #endif
1424 }
1425 
1426 static int compress_loaddata(struct aa_loaddata *data)
1427 {
1428 	AA_BUG(data->compressed_size > 0);
1429 
1430 	/*
1431 	 * Shortcut the no compression case, else we increase the amount of
1432 	 * storage required by a small amount
1433 	 */
1434 	if (aa_g_rawdata_compression_level != 0) {
1435 		void *udata = data->data;
1436 		int error = compress_zstd(udata, data->size, &data->data,
1437 					  &data->compressed_size);
1438 		if (error)
1439 			return error;
1440 
1441 		if (udata != data->data)
1442 			kvfree(udata);
1443 	} else
1444 		data->compressed_size = data->size;
1445 
1446 	return 0;
1447 }
1448 
1449 /**
1450  * aa_unpack - unpack packed binary profile(s) data loaded from user space
1451  * @udata: user data copied to kmem  (NOT NULL)
1452  * @lh: list to place unpacked profiles in a aa_repl_ws
1453  * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1454  *
1455  * Unpack user data and return refcounted allocated profile(s) stored in
1456  * @lh in order of discovery, with the list chain stored in base.list
1457  * or error
1458  *
1459  * Returns: profile(s) on @lh else error pointer if fails to unpack
1460  */
1461 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1462 	      const char **ns)
1463 {
1464 	struct aa_load_ent *tmp, *ent;
1465 	struct aa_profile *profile = NULL;
1466 	int error;
1467 	struct aa_ext e = {
1468 		.start = udata->data,
1469 		.end = udata->data + udata->size,
1470 		.pos = udata->data,
1471 	};
1472 
1473 	*ns = NULL;
1474 	while (e.pos < e.end) {
1475 		char *ns_name = NULL;
1476 		void *start;
1477 		error = verify_header(&e, e.pos == e.start, ns);
1478 		if (error)
1479 			goto fail;
1480 
1481 		start = e.pos;
1482 		profile = unpack_profile(&e, &ns_name);
1483 		if (IS_ERR(profile)) {
1484 			error = PTR_ERR(profile);
1485 			goto fail;
1486 		}
1487 
1488 		error = verify_profile(profile);
1489 		if (error)
1490 			goto fail_profile;
1491 
1492 		if (aa_g_hash_policy)
1493 			error = aa_calc_profile_hash(profile, e.version, start,
1494 						     e.pos - start);
1495 		if (error)
1496 			goto fail_profile;
1497 
1498 		ent = aa_load_ent_alloc();
1499 		if (!ent) {
1500 			error = -ENOMEM;
1501 			goto fail_profile;
1502 		}
1503 
1504 		ent->new = profile;
1505 		ent->ns_name = ns_name;
1506 		list_add_tail(&ent->list, lh);
1507 	}
1508 	udata->abi = e.version & K_ABI_MASK;
1509 	if (aa_g_hash_policy) {
1510 		udata->hash = aa_calc_hash(udata->data, udata->size);
1511 		if (IS_ERR(udata->hash)) {
1512 			error = PTR_ERR(udata->hash);
1513 			udata->hash = NULL;
1514 			goto fail;
1515 		}
1516 	}
1517 
1518 	if (aa_g_export_binary) {
1519 		error = compress_loaddata(udata);
1520 		if (error)
1521 			goto fail;
1522 	}
1523 	return 0;
1524 
1525 fail_profile:
1526 	aa_put_profile(profile);
1527 
1528 fail:
1529 	list_for_each_entry_safe(ent, tmp, lh, list) {
1530 		list_del_init(&ent->list);
1531 		aa_load_ent_free(ent);
1532 	}
1533 
1534 	return error;
1535 }
1536 
1537 #ifdef CONFIG_SECURITY_APPARMOR_KUNIT_TEST
1538 #include "policy_unpack_test.c"
1539 #endif /* CONFIG_SECURITY_APPARMOR_KUNIT_TEST */
1540