xref: /linux/security/apparmor/policy_unpack.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AppArmor security module
4  *
5  * This file contains AppArmor functions for unpacking policy loaded from
6  * userspace.
7  *
8  * Copyright (C) 1998-2008 Novell/SUSE
9  * Copyright 2009-2010 Canonical Ltd.
10  *
11  * AppArmor uses a serialized binary format for loading policy. To find
12  * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13  * All policy is validated before it is used.
14  */
15 
16 #include <asm/unaligned.h>
17 #include <linux/ctype.h>
18 #include <linux/errno.h>
19 #include <linux/zlib.h>
20 
21 #include "include/apparmor.h"
22 #include "include/audit.h"
23 #include "include/cred.h"
24 #include "include/crypto.h"
25 #include "include/match.h"
26 #include "include/path.h"
27 #include "include/policy.h"
28 #include "include/policy_unpack.h"
29 
30 #define K_ABI_MASK 0x3ff
31 #define FORCE_COMPLAIN_FLAG 0x800
32 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
33 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
34 
35 #define v5	5	/* base version */
36 #define v6	6	/* per entry policydb mediation check */
37 #define v7	7
38 #define v8	8	/* full network masking */
39 
40 /*
41  * The AppArmor interface treats data as a type byte followed by the
42  * actual data.  The interface has the notion of a a named entry
43  * which has a name (AA_NAME typecode followed by name string) followed by
44  * the entries typecode and data.  Named types allow for optional
45  * elements and extensions to be added and tested for without breaking
46  * backwards compatibility.
47  */
48 
49 enum aa_code {
50 	AA_U8,
51 	AA_U16,
52 	AA_U32,
53 	AA_U64,
54 	AA_NAME,		/* same as string except it is items name */
55 	AA_STRING,
56 	AA_BLOB,
57 	AA_STRUCT,
58 	AA_STRUCTEND,
59 	AA_LIST,
60 	AA_LISTEND,
61 	AA_ARRAY,
62 	AA_ARRAYEND,
63 };
64 
65 /*
66  * aa_ext is the read of the buffer containing the serialized profile.  The
67  * data is copied into a kernel buffer in apparmorfs and then handed off to
68  * the unpack routines.
69  */
70 struct aa_ext {
71 	void *start;
72 	void *end;
73 	void *pos;		/* pointer to current position in the buffer */
74 	u32 version;
75 };
76 
77 /* audit callback for unpack fields */
78 static void audit_cb(struct audit_buffer *ab, void *va)
79 {
80 	struct common_audit_data *sa = va;
81 
82 	if (aad(sa)->iface.ns) {
83 		audit_log_format(ab, " ns=");
84 		audit_log_untrustedstring(ab, aad(sa)->iface.ns);
85 	}
86 	if (aad(sa)->name) {
87 		audit_log_format(ab, " name=");
88 		audit_log_untrustedstring(ab, aad(sa)->name);
89 	}
90 	if (aad(sa)->iface.pos)
91 		audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
92 }
93 
94 /**
95  * audit_iface - do audit message for policy unpacking/load/replace/remove
96  * @new: profile if it has been allocated (MAYBE NULL)
97  * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
98  * @name: name of the profile being manipulated (MAYBE NULL)
99  * @info: any extra info about the failure (MAYBE NULL)
100  * @e: buffer position info
101  * @error: error code
102  *
103  * Returns: %0 or error
104  */
105 static int audit_iface(struct aa_profile *new, const char *ns_name,
106 		       const char *name, const char *info, struct aa_ext *e,
107 		       int error)
108 {
109 	struct aa_profile *profile = labels_profile(aa_current_raw_label());
110 	DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
111 	if (e)
112 		aad(&sa)->iface.pos = e->pos - e->start;
113 	aad(&sa)->iface.ns = ns_name;
114 	if (new)
115 		aad(&sa)->name = new->base.hname;
116 	else
117 		aad(&sa)->name = name;
118 	aad(&sa)->info = info;
119 	aad(&sa)->error = error;
120 
121 	return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
122 }
123 
124 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
125 {
126 	AA_BUG(!data);
127 	AA_BUG(!data->ns);
128 	AA_BUG(!data->dents[AAFS_LOADDATA_REVISION]);
129 	AA_BUG(!mutex_is_locked(&data->ns->lock));
130 	AA_BUG(data->revision > revision);
131 
132 	data->revision = revision;
133 	d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
134 		current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
135 	d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
136 		current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
137 }
138 
139 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
140 {
141 	if (l->size != r->size)
142 		return false;
143 	if (l->compressed_size != r->compressed_size)
144 		return false;
145 	if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
146 		return false;
147 	return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
148 }
149 
150 /*
151  * need to take the ns mutex lock which is NOT safe most places that
152  * put_loaddata is called, so we have to delay freeing it
153  */
154 static void do_loaddata_free(struct work_struct *work)
155 {
156 	struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
157 	struct aa_ns *ns = aa_get_ns(d->ns);
158 
159 	if (ns) {
160 		mutex_lock_nested(&ns->lock, ns->level);
161 		__aa_fs_remove_rawdata(d);
162 		mutex_unlock(&ns->lock);
163 		aa_put_ns(ns);
164 	}
165 
166 	kzfree(d->hash);
167 	kzfree(d->name);
168 	kvfree(d->data);
169 	kzfree(d);
170 }
171 
172 void aa_loaddata_kref(struct kref *kref)
173 {
174 	struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
175 
176 	if (d) {
177 		INIT_WORK(&d->work, do_loaddata_free);
178 		schedule_work(&d->work);
179 	}
180 }
181 
182 struct aa_loaddata *aa_loaddata_alloc(size_t size)
183 {
184 	struct aa_loaddata *d;
185 
186 	d = kzalloc(sizeof(*d), GFP_KERNEL);
187 	if (d == NULL)
188 		return ERR_PTR(-ENOMEM);
189 	d->data = kvzalloc(size, GFP_KERNEL);
190 	if (!d->data) {
191 		kfree(d);
192 		return ERR_PTR(-ENOMEM);
193 	}
194 	kref_init(&d->count);
195 	INIT_LIST_HEAD(&d->list);
196 
197 	return d;
198 }
199 
200 /* test if read will be in packed data bounds */
201 static bool inbounds(struct aa_ext *e, size_t size)
202 {
203 	return (size <= e->end - e->pos);
204 }
205 
206 static void *kvmemdup(const void *src, size_t len)
207 {
208 	void *p = kvmalloc(len, GFP_KERNEL);
209 
210 	if (p)
211 		memcpy(p, src, len);
212 	return p;
213 }
214 
215 /**
216  * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
217  * @e: serialized data read head (NOT NULL)
218  * @chunk: start address for chunk of data (NOT NULL)
219  *
220  * Returns: the size of chunk found with the read head at the end of the chunk.
221  */
222 static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
223 {
224 	size_t size = 0;
225 	void *pos = e->pos;
226 
227 	if (!inbounds(e, sizeof(u16)))
228 		goto fail;
229 	size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
230 	e->pos += sizeof(__le16);
231 	if (!inbounds(e, size))
232 		goto fail;
233 	*chunk = e->pos;
234 	e->pos += size;
235 	return size;
236 
237 fail:
238 	e->pos = pos;
239 	return 0;
240 }
241 
242 /* unpack control byte */
243 static bool unpack_X(struct aa_ext *e, enum aa_code code)
244 {
245 	if (!inbounds(e, 1))
246 		return 0;
247 	if (*(u8 *) e->pos != code)
248 		return 0;
249 	e->pos++;
250 	return 1;
251 }
252 
253 /**
254  * unpack_nameX - check is the next element is of type X with a name of @name
255  * @e: serialized data extent information  (NOT NULL)
256  * @code: type code
257  * @name: name to match to the serialized element.  (MAYBE NULL)
258  *
259  * check that the next serialized data element is of type X and has a tag
260  * name @name.  If @name is specified then there must be a matching
261  * name element in the stream.  If @name is NULL any name element will be
262  * skipped and only the typecode will be tested.
263  *
264  * Returns 1 on success (both type code and name tests match) and the read
265  * head is advanced past the headers
266  *
267  * Returns: 0 if either match fails, the read head does not move
268  */
269 static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
270 {
271 	/*
272 	 * May need to reset pos if name or type doesn't match
273 	 */
274 	void *pos = e->pos;
275 	/*
276 	 * Check for presence of a tagname, and if present name size
277 	 * AA_NAME tag value is a u16.
278 	 */
279 	if (unpack_X(e, AA_NAME)) {
280 		char *tag = NULL;
281 		size_t size = unpack_u16_chunk(e, &tag);
282 		/* if a name is specified it must match. otherwise skip tag */
283 		if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
284 			goto fail;
285 	} else if (name) {
286 		/* if a name is specified and there is no name tag fail */
287 		goto fail;
288 	}
289 
290 	/* now check if type code matches */
291 	if (unpack_X(e, code))
292 		return 1;
293 
294 fail:
295 	e->pos = pos;
296 	return 0;
297 }
298 
299 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
300 {
301 	void *pos = e->pos;
302 
303 	if (unpack_nameX(e, AA_U8, name)) {
304 		if (!inbounds(e, sizeof(u8)))
305 			goto fail;
306 		if (data)
307 			*data = get_unaligned((u8 *)e->pos);
308 		e->pos += sizeof(u8);
309 		return 1;
310 	}
311 
312 fail:
313 	e->pos = pos;
314 	return 0;
315 }
316 
317 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
318 {
319 	void *pos = e->pos;
320 
321 	if (unpack_nameX(e, AA_U32, name)) {
322 		if (!inbounds(e, sizeof(u32)))
323 			goto fail;
324 		if (data)
325 			*data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
326 		e->pos += sizeof(u32);
327 		return 1;
328 	}
329 
330 fail:
331 	e->pos = pos;
332 	return 0;
333 }
334 
335 static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
336 {
337 	void *pos = e->pos;
338 
339 	if (unpack_nameX(e, AA_U64, name)) {
340 		if (!inbounds(e, sizeof(u64)))
341 			goto fail;
342 		if (data)
343 			*data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
344 		e->pos += sizeof(u64);
345 		return 1;
346 	}
347 
348 fail:
349 	e->pos = pos;
350 	return 0;
351 }
352 
353 static size_t unpack_array(struct aa_ext *e, const char *name)
354 {
355 	void *pos = e->pos;
356 
357 	if (unpack_nameX(e, AA_ARRAY, name)) {
358 		int size;
359 		if (!inbounds(e, sizeof(u16)))
360 			goto fail;
361 		size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
362 		e->pos += sizeof(u16);
363 		return size;
364 	}
365 
366 fail:
367 	e->pos = pos;
368 	return 0;
369 }
370 
371 static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
372 {
373 	void *pos = e->pos;
374 
375 	if (unpack_nameX(e, AA_BLOB, name)) {
376 		u32 size;
377 		if (!inbounds(e, sizeof(u32)))
378 			goto fail;
379 		size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
380 		e->pos += sizeof(u32);
381 		if (inbounds(e, (size_t) size)) {
382 			*blob = e->pos;
383 			e->pos += size;
384 			return size;
385 		}
386 	}
387 
388 fail:
389 	e->pos = pos;
390 	return 0;
391 }
392 
393 static int unpack_str(struct aa_ext *e, const char **string, const char *name)
394 {
395 	char *src_str;
396 	size_t size = 0;
397 	void *pos = e->pos;
398 	*string = NULL;
399 	if (unpack_nameX(e, AA_STRING, name)) {
400 		size = unpack_u16_chunk(e, &src_str);
401 		if (size) {
402 			/* strings are null terminated, length is size - 1 */
403 			if (src_str[size - 1] != 0)
404 				goto fail;
405 			*string = src_str;
406 
407 			return size;
408 		}
409 	}
410 
411 fail:
412 	e->pos = pos;
413 	return 0;
414 }
415 
416 static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
417 {
418 	const char *tmp;
419 	void *pos = e->pos;
420 	int res = unpack_str(e, &tmp, name);
421 	*string = NULL;
422 
423 	if (!res)
424 		return 0;
425 
426 	*string = kmemdup(tmp, res, GFP_KERNEL);
427 	if (!*string) {
428 		e->pos = pos;
429 		return 0;
430 	}
431 
432 	return res;
433 }
434 
435 
436 /**
437  * unpack_dfa - unpack a file rule dfa
438  * @e: serialized data extent information (NOT NULL)
439  *
440  * returns dfa or ERR_PTR or NULL if no dfa
441  */
442 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
443 {
444 	char *blob = NULL;
445 	size_t size;
446 	struct aa_dfa *dfa = NULL;
447 
448 	size = unpack_blob(e, &blob, "aadfa");
449 	if (size) {
450 		/*
451 		 * The dfa is aligned with in the blob to 8 bytes
452 		 * from the beginning of the stream.
453 		 * alignment adjust needed by dfa unpack
454 		 */
455 		size_t sz = blob - (char *) e->start -
456 			((e->pos - e->start) & 7);
457 		size_t pad = ALIGN(sz, 8) - sz;
458 		int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
459 			TO_ACCEPT2_FLAG(YYTD_DATA32) | DFA_FLAG_VERIFY_STATES;
460 		dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
461 
462 		if (IS_ERR(dfa))
463 			return dfa;
464 
465 	}
466 
467 	return dfa;
468 }
469 
470 /**
471  * unpack_trans_table - unpack a profile transition table
472  * @e: serialized data extent information  (NOT NULL)
473  * @profile: profile to add the accept table to (NOT NULL)
474  *
475  * Returns: 1 if table successfully unpacked
476  */
477 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
478 {
479 	void *saved_pos = e->pos;
480 
481 	/* exec table is optional */
482 	if (unpack_nameX(e, AA_STRUCT, "xtable")) {
483 		int i, size;
484 
485 		size = unpack_array(e, NULL);
486 		/* currently 4 exec bits and entries 0-3 are reserved iupcx */
487 		if (size > 16 - 4)
488 			goto fail;
489 		profile->file.trans.table = kcalloc(size, sizeof(char *),
490 						    GFP_KERNEL);
491 		if (!profile->file.trans.table)
492 			goto fail;
493 
494 		profile->file.trans.size = size;
495 		for (i = 0; i < size; i++) {
496 			char *str;
497 			int c, j, pos, size2 = unpack_strdup(e, &str, NULL);
498 			/* unpack_strdup verifies that the last character is
499 			 * null termination byte.
500 			 */
501 			if (!size2)
502 				goto fail;
503 			profile->file.trans.table[i] = str;
504 			/* verify that name doesn't start with space */
505 			if (isspace(*str))
506 				goto fail;
507 
508 			/* count internal #  of internal \0 */
509 			for (c = j = 0; j < size2 - 1; j++) {
510 				if (!str[j]) {
511 					pos = j;
512 					c++;
513 				}
514 			}
515 			if (*str == ':') {
516 				/* first character after : must be valid */
517 				if (!str[1])
518 					goto fail;
519 				/* beginning with : requires an embedded \0,
520 				 * verify that exactly 1 internal \0 exists
521 				 * trailing \0 already verified by unpack_strdup
522 				 *
523 				 * convert \0 back to : for label_parse
524 				 */
525 				if (c == 1)
526 					str[pos] = ':';
527 				else if (c > 1)
528 					goto fail;
529 			} else if (c)
530 				/* fail - all other cases with embedded \0 */
531 				goto fail;
532 		}
533 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
534 			goto fail;
535 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
536 			goto fail;
537 	}
538 	return 1;
539 
540 fail:
541 	aa_free_domain_entries(&profile->file.trans);
542 	e->pos = saved_pos;
543 	return 0;
544 }
545 
546 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
547 {
548 	void *pos = e->pos;
549 
550 	if (unpack_nameX(e, AA_STRUCT, "xattrs")) {
551 		int i, size;
552 
553 		size = unpack_array(e, NULL);
554 		profile->xattr_count = size;
555 		profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
556 		if (!profile->xattrs)
557 			goto fail;
558 		for (i = 0; i < size; i++) {
559 			if (!unpack_strdup(e, &profile->xattrs[i], NULL))
560 				goto fail;
561 		}
562 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
563 			goto fail;
564 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
565 			goto fail;
566 	}
567 
568 	return 1;
569 
570 fail:
571 	e->pos = pos;
572 	return 0;
573 }
574 
575 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
576 {
577 	void *pos = e->pos;
578 	int i, size;
579 
580 	if (unpack_nameX(e, AA_STRUCT, "secmark")) {
581 		size = unpack_array(e, NULL);
582 
583 		profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
584 					   GFP_KERNEL);
585 		if (!profile->secmark)
586 			goto fail;
587 
588 		profile->secmark_count = size;
589 
590 		for (i = 0; i < size; i++) {
591 			if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
592 				goto fail;
593 			if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
594 				goto fail;
595 			if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
596 				goto fail;
597 		}
598 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
599 			goto fail;
600 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
601 			goto fail;
602 	}
603 
604 	return 1;
605 
606 fail:
607 	if (profile->secmark) {
608 		for (i = 0; i < size; i++)
609 			kfree(profile->secmark[i].label);
610 		kfree(profile->secmark);
611 		profile->secmark_count = 0;
612 		profile->secmark = NULL;
613 	}
614 
615 	e->pos = pos;
616 	return 0;
617 }
618 
619 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
620 {
621 	void *pos = e->pos;
622 
623 	/* rlimits are optional */
624 	if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
625 		int i, size;
626 		u32 tmp = 0;
627 		if (!unpack_u32(e, &tmp, NULL))
628 			goto fail;
629 		profile->rlimits.mask = tmp;
630 
631 		size = unpack_array(e, NULL);
632 		if (size > RLIM_NLIMITS)
633 			goto fail;
634 		for (i = 0; i < size; i++) {
635 			u64 tmp2 = 0;
636 			int a = aa_map_resource(i);
637 			if (!unpack_u64(e, &tmp2, NULL))
638 				goto fail;
639 			profile->rlimits.limits[a].rlim_max = tmp2;
640 		}
641 		if (!unpack_nameX(e, AA_ARRAYEND, NULL))
642 			goto fail;
643 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
644 			goto fail;
645 	}
646 	return 1;
647 
648 fail:
649 	e->pos = pos;
650 	return 0;
651 }
652 
653 static u32 strhash(const void *data, u32 len, u32 seed)
654 {
655 	const char * const *key = data;
656 
657 	return jhash(*key, strlen(*key), seed);
658 }
659 
660 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
661 {
662 	const struct aa_data *data = obj;
663 	const char * const *key = arg->key;
664 
665 	return strcmp(data->key, *key);
666 }
667 
668 /**
669  * unpack_profile - unpack a serialized profile
670  * @e: serialized data extent information (NOT NULL)
671  *
672  * NOTE: unpack profile sets audit struct if there is a failure
673  */
674 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
675 {
676 	struct aa_profile *profile = NULL;
677 	const char *tmpname, *tmpns = NULL, *name = NULL;
678 	const char *info = "failed to unpack profile";
679 	size_t ns_len;
680 	struct rhashtable_params params = { 0 };
681 	char *key = NULL;
682 	struct aa_data *data;
683 	int i, error = -EPROTO;
684 	kernel_cap_t tmpcap;
685 	u32 tmp;
686 
687 	*ns_name = NULL;
688 
689 	/* check that we have the right struct being passed */
690 	if (!unpack_nameX(e, AA_STRUCT, "profile"))
691 		goto fail;
692 	if (!unpack_str(e, &name, NULL))
693 		goto fail;
694 	if (*name == '\0')
695 		goto fail;
696 
697 	tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
698 	if (tmpns) {
699 		*ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
700 		if (!*ns_name) {
701 			info = "out of memory";
702 			goto fail;
703 		}
704 		name = tmpname;
705 	}
706 
707 	profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
708 	if (!profile)
709 		return ERR_PTR(-ENOMEM);
710 
711 	/* profile renaming is optional */
712 	(void) unpack_str(e, &profile->rename, "rename");
713 
714 	/* attachment string is optional */
715 	(void) unpack_str(e, &profile->attach, "attach");
716 
717 	/* xmatch is optional and may be NULL */
718 	profile->xmatch = unpack_dfa(e);
719 	if (IS_ERR(profile->xmatch)) {
720 		error = PTR_ERR(profile->xmatch);
721 		profile->xmatch = NULL;
722 		info = "bad xmatch";
723 		goto fail;
724 	}
725 	/* xmatch_len is not optional if xmatch is set */
726 	if (profile->xmatch) {
727 		if (!unpack_u32(e, &tmp, NULL)) {
728 			info = "missing xmatch len";
729 			goto fail;
730 		}
731 		profile->xmatch_len = tmp;
732 	}
733 
734 	/* disconnected attachment string is optional */
735 	(void) unpack_str(e, &profile->disconnected, "disconnected");
736 
737 	/* per profile debug flags (complain, audit) */
738 	if (!unpack_nameX(e, AA_STRUCT, "flags")) {
739 		info = "profile missing flags";
740 		goto fail;
741 	}
742 	info = "failed to unpack profile flags";
743 	if (!unpack_u32(e, &tmp, NULL))
744 		goto fail;
745 	if (tmp & PACKED_FLAG_HAT)
746 		profile->label.flags |= FLAG_HAT;
747 	if (!unpack_u32(e, &tmp, NULL))
748 		goto fail;
749 	if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG))
750 		profile->mode = APPARMOR_COMPLAIN;
751 	else if (tmp == PACKED_MODE_KILL)
752 		profile->mode = APPARMOR_KILL;
753 	else if (tmp == PACKED_MODE_UNCONFINED)
754 		profile->mode = APPARMOR_UNCONFINED;
755 	if (!unpack_u32(e, &tmp, NULL))
756 		goto fail;
757 	if (tmp)
758 		profile->audit = AUDIT_ALL;
759 
760 	if (!unpack_nameX(e, AA_STRUCTEND, NULL))
761 		goto fail;
762 
763 	/* path_flags is optional */
764 	if (unpack_u32(e, &profile->path_flags, "path_flags"))
765 		profile->path_flags |= profile->label.flags &
766 			PATH_MEDIATE_DELETED;
767 	else
768 		/* set a default value if path_flags field is not present */
769 		profile->path_flags = PATH_MEDIATE_DELETED;
770 
771 	info = "failed to unpack profile capabilities";
772 	if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
773 		goto fail;
774 	if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
775 		goto fail;
776 	if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
777 		goto fail;
778 	if (!unpack_u32(e, &tmpcap.cap[0], NULL))
779 		goto fail;
780 
781 	info = "failed to unpack upper profile capabilities";
782 	if (unpack_nameX(e, AA_STRUCT, "caps64")) {
783 		/* optional upper half of 64 bit caps */
784 		if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
785 			goto fail;
786 		if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
787 			goto fail;
788 		if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
789 			goto fail;
790 		if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
791 			goto fail;
792 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
793 			goto fail;
794 	}
795 
796 	info = "failed to unpack extended profile capabilities";
797 	if (unpack_nameX(e, AA_STRUCT, "capsx")) {
798 		/* optional extended caps mediation mask */
799 		if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
800 			goto fail;
801 		if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
802 			goto fail;
803 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
804 			goto fail;
805 	}
806 
807 	if (!unpack_xattrs(e, profile)) {
808 		info = "failed to unpack profile xattrs";
809 		goto fail;
810 	}
811 
812 	if (!unpack_rlimits(e, profile)) {
813 		info = "failed to unpack profile rlimits";
814 		goto fail;
815 	}
816 
817 	if (!unpack_secmark(e, profile)) {
818 		info = "failed to unpack profile secmark rules";
819 		goto fail;
820 	}
821 
822 	if (unpack_nameX(e, AA_STRUCT, "policydb")) {
823 		/* generic policy dfa - optional and may be NULL */
824 		info = "failed to unpack policydb";
825 		profile->policy.dfa = unpack_dfa(e);
826 		if (IS_ERR(profile->policy.dfa)) {
827 			error = PTR_ERR(profile->policy.dfa);
828 			profile->policy.dfa = NULL;
829 			goto fail;
830 		} else if (!profile->policy.dfa) {
831 			error = -EPROTO;
832 			goto fail;
833 		}
834 		if (!unpack_u32(e, &profile->policy.start[0], "start"))
835 			/* default start state */
836 			profile->policy.start[0] = DFA_START;
837 		/* setup class index */
838 		for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
839 			profile->policy.start[i] =
840 				aa_dfa_next(profile->policy.dfa,
841 					    profile->policy.start[0],
842 					    i);
843 		}
844 		if (!unpack_nameX(e, AA_STRUCTEND, NULL))
845 			goto fail;
846 	} else
847 		profile->policy.dfa = aa_get_dfa(nulldfa);
848 
849 	/* get file rules */
850 	profile->file.dfa = unpack_dfa(e);
851 	if (IS_ERR(profile->file.dfa)) {
852 		error = PTR_ERR(profile->file.dfa);
853 		profile->file.dfa = NULL;
854 		info = "failed to unpack profile file rules";
855 		goto fail;
856 	} else if (profile->file.dfa) {
857 		if (!unpack_u32(e, &profile->file.start, "dfa_start"))
858 			/* default start state */
859 			profile->file.start = DFA_START;
860 	} else if (profile->policy.dfa &&
861 		   profile->policy.start[AA_CLASS_FILE]) {
862 		profile->file.dfa = aa_get_dfa(profile->policy.dfa);
863 		profile->file.start = profile->policy.start[AA_CLASS_FILE];
864 	} else
865 		profile->file.dfa = aa_get_dfa(nulldfa);
866 
867 	if (!unpack_trans_table(e, profile)) {
868 		info = "failed to unpack profile transition table";
869 		goto fail;
870 	}
871 
872 	if (unpack_nameX(e, AA_STRUCT, "data")) {
873 		info = "out of memory";
874 		profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
875 		if (!profile->data)
876 			goto fail;
877 
878 		params.nelem_hint = 3;
879 		params.key_len = sizeof(void *);
880 		params.key_offset = offsetof(struct aa_data, key);
881 		params.head_offset = offsetof(struct aa_data, head);
882 		params.hashfn = strhash;
883 		params.obj_cmpfn = datacmp;
884 
885 		if (rhashtable_init(profile->data, &params)) {
886 			info = "failed to init key, value hash table";
887 			goto fail;
888 		}
889 
890 		while (unpack_strdup(e, &key, NULL)) {
891 			data = kzalloc(sizeof(*data), GFP_KERNEL);
892 			if (!data) {
893 				kzfree(key);
894 				goto fail;
895 			}
896 
897 			data->key = key;
898 			data->size = unpack_blob(e, &data->data, NULL);
899 			data->data = kvmemdup(data->data, data->size);
900 			if (data->size && !data->data) {
901 				kzfree(data->key);
902 				kzfree(data);
903 				goto fail;
904 			}
905 
906 			rhashtable_insert_fast(profile->data, &data->head,
907 					       profile->data->p);
908 		}
909 
910 		if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
911 			info = "failed to unpack end of key, value data table";
912 			goto fail;
913 		}
914 	}
915 
916 	if (!unpack_nameX(e, AA_STRUCTEND, NULL)) {
917 		info = "failed to unpack end of profile";
918 		goto fail;
919 	}
920 
921 	return profile;
922 
923 fail:
924 	if (profile)
925 		name = NULL;
926 	else if (!name)
927 		name = "unknown";
928 	audit_iface(profile, NULL, name, info, e, error);
929 	aa_free_profile(profile);
930 
931 	return ERR_PTR(error);
932 }
933 
934 /**
935  * verify_head - unpack serialized stream header
936  * @e: serialized data read head (NOT NULL)
937  * @required: whether the header is required or optional
938  * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
939  *
940  * Returns: error or 0 if header is good
941  */
942 static int verify_header(struct aa_ext *e, int required, const char **ns)
943 {
944 	int error = -EPROTONOSUPPORT;
945 	const char *name = NULL;
946 	*ns = NULL;
947 
948 	/* get the interface version */
949 	if (!unpack_u32(e, &e->version, "version")) {
950 		if (required) {
951 			audit_iface(NULL, NULL, NULL, "invalid profile format",
952 				    e, error);
953 			return error;
954 		}
955 	}
956 
957 	/* Check that the interface version is currently supported.
958 	 * if not specified use previous version
959 	 * Mask off everything that is not kernel abi version
960 	 */
961 	if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
962 		audit_iface(NULL, NULL, NULL, "unsupported interface version",
963 			    e, error);
964 		return error;
965 	}
966 
967 	/* read the namespace if present */
968 	if (unpack_str(e, &name, "namespace")) {
969 		if (*name == '\0') {
970 			audit_iface(NULL, NULL, NULL, "invalid namespace name",
971 				    e, error);
972 			return error;
973 		}
974 		if (*ns && strcmp(*ns, name)) {
975 			audit_iface(NULL, NULL, NULL, "invalid ns change", e,
976 				    error);
977 		} else if (!*ns) {
978 			*ns = kstrdup(name, GFP_KERNEL);
979 			if (!*ns)
980 				return -ENOMEM;
981 		}
982 	}
983 
984 	return 0;
985 }
986 
987 static bool verify_xindex(int xindex, int table_size)
988 {
989 	int index, xtype;
990 	xtype = xindex & AA_X_TYPE_MASK;
991 	index = xindex & AA_X_INDEX_MASK;
992 	if (xtype == AA_X_TABLE && index >= table_size)
993 		return 0;
994 	return 1;
995 }
996 
997 /* verify dfa xindexes are in range of transition tables */
998 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
999 {
1000 	int i;
1001 	for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
1002 		if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
1003 			return 0;
1004 		if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
1005 			return 0;
1006 	}
1007 	return 1;
1008 }
1009 
1010 /**
1011  * verify_profile - Do post unpack analysis to verify profile consistency
1012  * @profile: profile to verify (NOT NULL)
1013  *
1014  * Returns: 0 if passes verification else error
1015  */
1016 static int verify_profile(struct aa_profile *profile)
1017 {
1018 	if (profile->file.dfa &&
1019 	    !verify_dfa_xindex(profile->file.dfa,
1020 			       profile->file.trans.size)) {
1021 		audit_iface(profile, NULL, NULL, "Invalid named transition",
1022 			    NULL, -EPROTO);
1023 		return -EPROTO;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 void aa_load_ent_free(struct aa_load_ent *ent)
1030 {
1031 	if (ent) {
1032 		aa_put_profile(ent->rename);
1033 		aa_put_profile(ent->old);
1034 		aa_put_profile(ent->new);
1035 		kfree(ent->ns_name);
1036 		kzfree(ent);
1037 	}
1038 }
1039 
1040 struct aa_load_ent *aa_load_ent_alloc(void)
1041 {
1042 	struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1043 	if (ent)
1044 		INIT_LIST_HEAD(&ent->list);
1045 	return ent;
1046 }
1047 
1048 static int deflate_compress(const char *src, size_t slen, char **dst,
1049 			    size_t *dlen)
1050 {
1051 	int error;
1052 	struct z_stream_s strm;
1053 	void *stgbuf, *dstbuf;
1054 	size_t stglen = deflateBound(slen);
1055 
1056 	memset(&strm, 0, sizeof(strm));
1057 
1058 	if (stglen < slen)
1059 		return -EFBIG;
1060 
1061 	strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
1062 							     MAX_MEM_LEVEL),
1063 				  GFP_KERNEL);
1064 	if (!strm.workspace)
1065 		return -ENOMEM;
1066 
1067 	error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
1068 	if (error != Z_OK) {
1069 		error = -ENOMEM;
1070 		goto fail_deflate_init;
1071 	}
1072 
1073 	stgbuf = kvzalloc(stglen, GFP_KERNEL);
1074 	if (!stgbuf) {
1075 		error = -ENOMEM;
1076 		goto fail_stg_alloc;
1077 	}
1078 
1079 	strm.next_in = src;
1080 	strm.avail_in = slen;
1081 	strm.next_out = stgbuf;
1082 	strm.avail_out = stglen;
1083 
1084 	error = zlib_deflate(&strm, Z_FINISH);
1085 	if (error != Z_STREAM_END) {
1086 		error = -EINVAL;
1087 		goto fail_deflate;
1088 	}
1089 	error = 0;
1090 
1091 	if (is_vmalloc_addr(stgbuf)) {
1092 		dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
1093 		if (dstbuf) {
1094 			memcpy(dstbuf, stgbuf, strm.total_out);
1095 			kvfree(stgbuf);
1096 		}
1097 	} else
1098 		/*
1099 		 * If the staging buffer was kmalloc'd, then using krealloc is
1100 		 * probably going to be faster. The destination buffer will
1101 		 * always be smaller, so it's just shrunk, avoiding a memcpy
1102 		 */
1103 		dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
1104 
1105 	if (!dstbuf) {
1106 		error = -ENOMEM;
1107 		goto fail_deflate;
1108 	}
1109 
1110 	*dst = dstbuf;
1111 	*dlen = strm.total_out;
1112 
1113 fail_stg_alloc:
1114 	zlib_deflateEnd(&strm);
1115 fail_deflate_init:
1116 	kvfree(strm.workspace);
1117 	return error;
1118 
1119 fail_deflate:
1120 	kvfree(stgbuf);
1121 	goto fail_stg_alloc;
1122 }
1123 
1124 static int compress_loaddata(struct aa_loaddata *data)
1125 {
1126 
1127 	AA_BUG(data->compressed_size > 0);
1128 
1129 	/*
1130 	 * Shortcut the no compression case, else we increase the amount of
1131 	 * storage required by a small amount
1132 	 */
1133 	if (aa_g_rawdata_compression_level != 0) {
1134 		void *udata = data->data;
1135 		int error = deflate_compress(udata, data->size, &data->data,
1136 					     &data->compressed_size);
1137 		if (error)
1138 			return error;
1139 
1140 		kvfree(udata);
1141 	} else
1142 		data->compressed_size = data->size;
1143 
1144 	return 0;
1145 }
1146 
1147 /**
1148  * aa_unpack - unpack packed binary profile(s) data loaded from user space
1149  * @udata: user data copied to kmem  (NOT NULL)
1150  * @lh: list to place unpacked profiles in a aa_repl_ws
1151  * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1152  *
1153  * Unpack user data and return refcounted allocated profile(s) stored in
1154  * @lh in order of discovery, with the list chain stored in base.list
1155  * or error
1156  *
1157  * Returns: profile(s) on @lh else error pointer if fails to unpack
1158  */
1159 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1160 	      const char **ns)
1161 {
1162 	struct aa_load_ent *tmp, *ent;
1163 	struct aa_profile *profile = NULL;
1164 	int error;
1165 	struct aa_ext e = {
1166 		.start = udata->data,
1167 		.end = udata->data + udata->size,
1168 		.pos = udata->data,
1169 	};
1170 
1171 	*ns = NULL;
1172 	while (e.pos < e.end) {
1173 		char *ns_name = NULL;
1174 		void *start;
1175 		error = verify_header(&e, e.pos == e.start, ns);
1176 		if (error)
1177 			goto fail;
1178 
1179 		start = e.pos;
1180 		profile = unpack_profile(&e, &ns_name);
1181 		if (IS_ERR(profile)) {
1182 			error = PTR_ERR(profile);
1183 			goto fail;
1184 		}
1185 
1186 		error = verify_profile(profile);
1187 		if (error)
1188 			goto fail_profile;
1189 
1190 		if (aa_g_hash_policy)
1191 			error = aa_calc_profile_hash(profile, e.version, start,
1192 						     e.pos - start);
1193 		if (error)
1194 			goto fail_profile;
1195 
1196 		ent = aa_load_ent_alloc();
1197 		if (!ent) {
1198 			error = -ENOMEM;
1199 			goto fail_profile;
1200 		}
1201 
1202 		ent->new = profile;
1203 		ent->ns_name = ns_name;
1204 		list_add_tail(&ent->list, lh);
1205 	}
1206 	udata->abi = e.version & K_ABI_MASK;
1207 	if (aa_g_hash_policy) {
1208 		udata->hash = aa_calc_hash(udata->data, udata->size);
1209 		if (IS_ERR(udata->hash)) {
1210 			error = PTR_ERR(udata->hash);
1211 			udata->hash = NULL;
1212 			goto fail;
1213 		}
1214 	}
1215 	error = compress_loaddata(udata);
1216 	if (error)
1217 		goto fail;
1218 	return 0;
1219 
1220 fail_profile:
1221 	aa_put_profile(profile);
1222 
1223 fail:
1224 	list_for_each_entry_safe(ent, tmp, lh, list) {
1225 		list_del_init(&ent->list);
1226 		aa_load_ent_free(ent);
1227 	}
1228 
1229 	return error;
1230 }
1231