1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor functions for unpacking policy loaded from 6 * userspace. 7 * 8 * Copyright (C) 1998-2008 Novell/SUSE 9 * Copyright 2009-2010 Canonical Ltd. 10 * 11 * AppArmor uses a serialized binary format for loading policy. To find 12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst 13 * All policy is validated before it is used. 14 */ 15 16 #include <asm/unaligned.h> 17 #include <kunit/visibility.h> 18 #include <linux/ctype.h> 19 #include <linux/errno.h> 20 #include <linux/zstd.h> 21 22 #include "include/apparmor.h" 23 #include "include/audit.h" 24 #include "include/cred.h" 25 #include "include/crypto.h" 26 #include "include/file.h" 27 #include "include/match.h" 28 #include "include/path.h" 29 #include "include/policy.h" 30 #include "include/policy_unpack.h" 31 #include "include/policy_compat.h" 32 33 /* audit callback for unpack fields */ 34 static void audit_cb(struct audit_buffer *ab, void *va) 35 { 36 struct common_audit_data *sa = va; 37 struct apparmor_audit_data *ad = aad(sa); 38 39 if (ad->iface.ns) { 40 audit_log_format(ab, " ns="); 41 audit_log_untrustedstring(ab, ad->iface.ns); 42 } 43 if (ad->name) { 44 audit_log_format(ab, " name="); 45 audit_log_untrustedstring(ab, ad->name); 46 } 47 if (ad->iface.pos) 48 audit_log_format(ab, " offset=%ld", ad->iface.pos); 49 } 50 51 /** 52 * audit_iface - do audit message for policy unpacking/load/replace/remove 53 * @new: profile if it has been allocated (MAYBE NULL) 54 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL) 55 * @name: name of the profile being manipulated (MAYBE NULL) 56 * @info: any extra info about the failure (MAYBE NULL) 57 * @e: buffer position info 58 * @error: error code 59 * 60 * Returns: %0 or error 61 */ 62 static int audit_iface(struct aa_profile *new, const char *ns_name, 63 const char *name, const char *info, struct aa_ext *e, 64 int error) 65 { 66 struct aa_profile *profile = labels_profile(aa_current_raw_label()); 67 DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL); 68 if (e) 69 ad.iface.pos = e->pos - e->start; 70 ad.iface.ns = ns_name; 71 if (new) 72 ad.name = new->base.hname; 73 else 74 ad.name = name; 75 ad.info = info; 76 ad.error = error; 77 78 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb); 79 } 80 81 void __aa_loaddata_update(struct aa_loaddata *data, long revision) 82 { 83 AA_BUG(!data); 84 AA_BUG(!data->ns); 85 AA_BUG(!mutex_is_locked(&data->ns->lock)); 86 AA_BUG(data->revision > revision); 87 88 data->revision = revision; 89 if ((data->dents[AAFS_LOADDATA_REVISION])) { 90 struct inode *inode; 91 92 inode = d_inode(data->dents[AAFS_LOADDATA_DIR]); 93 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 94 95 inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]); 96 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 97 } 98 } 99 100 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r) 101 { 102 if (l->size != r->size) 103 return false; 104 if (l->compressed_size != r->compressed_size) 105 return false; 106 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0) 107 return false; 108 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0; 109 } 110 111 /* 112 * need to take the ns mutex lock which is NOT safe most places that 113 * put_loaddata is called, so we have to delay freeing it 114 */ 115 static void do_loaddata_free(struct work_struct *work) 116 { 117 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 118 struct aa_ns *ns = aa_get_ns(d->ns); 119 120 if (ns) { 121 mutex_lock_nested(&ns->lock, ns->level); 122 __aa_fs_remove_rawdata(d); 123 mutex_unlock(&ns->lock); 124 aa_put_ns(ns); 125 } 126 127 kfree_sensitive(d->hash); 128 kfree_sensitive(d->name); 129 kvfree(d->data); 130 kfree_sensitive(d); 131 } 132 133 void aa_loaddata_kref(struct kref *kref) 134 { 135 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count); 136 137 if (d) { 138 INIT_WORK(&d->work, do_loaddata_free); 139 schedule_work(&d->work); 140 } 141 } 142 143 struct aa_loaddata *aa_loaddata_alloc(size_t size) 144 { 145 struct aa_loaddata *d; 146 147 d = kzalloc(sizeof(*d), GFP_KERNEL); 148 if (d == NULL) 149 return ERR_PTR(-ENOMEM); 150 d->data = kvzalloc(size, GFP_KERNEL); 151 if (!d->data) { 152 kfree(d); 153 return ERR_PTR(-ENOMEM); 154 } 155 kref_init(&d->count); 156 INIT_LIST_HEAD(&d->list); 157 158 return d; 159 } 160 161 /* test if read will be in packed data bounds */ 162 VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size) 163 { 164 return (size <= e->end - e->pos); 165 } 166 EXPORT_SYMBOL_IF_KUNIT(aa_inbounds); 167 168 /** 169 * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk 170 * @e: serialized data read head (NOT NULL) 171 * @chunk: start address for chunk of data (NOT NULL) 172 * 173 * Returns: the size of chunk found with the read head at the end of the chunk. 174 */ 175 VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk) 176 { 177 size_t size = 0; 178 void *pos = e->pos; 179 180 if (!aa_inbounds(e, sizeof(u16))) 181 goto fail; 182 size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 183 e->pos += sizeof(__le16); 184 if (!aa_inbounds(e, size)) 185 goto fail; 186 *chunk = e->pos; 187 e->pos += size; 188 return size; 189 190 fail: 191 e->pos = pos; 192 return 0; 193 } 194 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk); 195 196 /* unpack control byte */ 197 VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code) 198 { 199 if (!aa_inbounds(e, 1)) 200 return false; 201 if (*(u8 *) e->pos != code) 202 return false; 203 e->pos++; 204 return true; 205 } 206 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X); 207 208 /** 209 * aa_unpack_nameX - check is the next element is of type X with a name of @name 210 * @e: serialized data extent information (NOT NULL) 211 * @code: type code 212 * @name: name to match to the serialized element. (MAYBE NULL) 213 * 214 * check that the next serialized data element is of type X and has a tag 215 * name @name. If @name is specified then there must be a matching 216 * name element in the stream. If @name is NULL any name element will be 217 * skipped and only the typecode will be tested. 218 * 219 * Returns true on success (both type code and name tests match) and the read 220 * head is advanced past the headers 221 * 222 * Returns: false if either match fails, the read head does not move 223 */ 224 VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) 225 { 226 /* 227 * May need to reset pos if name or type doesn't match 228 */ 229 void *pos = e->pos; 230 /* 231 * Check for presence of a tagname, and if present name size 232 * AA_NAME tag value is a u16. 233 */ 234 if (aa_unpack_X(e, AA_NAME)) { 235 char *tag = NULL; 236 size_t size = aa_unpack_u16_chunk(e, &tag); 237 /* if a name is specified it must match. otherwise skip tag */ 238 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) 239 goto fail; 240 } else if (name) { 241 /* if a name is specified and there is no name tag fail */ 242 goto fail; 243 } 244 245 /* now check if type code matches */ 246 if (aa_unpack_X(e, code)) 247 return true; 248 249 fail: 250 e->pos = pos; 251 return false; 252 } 253 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX); 254 255 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name) 256 { 257 void *pos = e->pos; 258 259 if (aa_unpack_nameX(e, AA_U8, name)) { 260 if (!aa_inbounds(e, sizeof(u8))) 261 goto fail; 262 if (data) 263 *data = *((u8 *)e->pos); 264 e->pos += sizeof(u8); 265 return true; 266 } 267 268 fail: 269 e->pos = pos; 270 return false; 271 } 272 273 VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name) 274 { 275 void *pos = e->pos; 276 277 if (aa_unpack_nameX(e, AA_U32, name)) { 278 if (!aa_inbounds(e, sizeof(u32))) 279 goto fail; 280 if (data) 281 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 282 e->pos += sizeof(u32); 283 return true; 284 } 285 286 fail: 287 e->pos = pos; 288 return false; 289 } 290 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32); 291 292 VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name) 293 { 294 void *pos = e->pos; 295 296 if (aa_unpack_nameX(e, AA_U64, name)) { 297 if (!aa_inbounds(e, sizeof(u64))) 298 goto fail; 299 if (data) 300 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos)); 301 e->pos += sizeof(u64); 302 return true; 303 } 304 305 fail: 306 e->pos = pos; 307 return false; 308 } 309 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64); 310 311 static bool aa_unpack_cap_low(struct aa_ext *e, kernel_cap_t *data, const char *name) 312 { 313 u32 val; 314 315 if (!aa_unpack_u32(e, &val, name)) 316 return false; 317 data->val = val; 318 return true; 319 } 320 321 static bool aa_unpack_cap_high(struct aa_ext *e, kernel_cap_t *data, const char *name) 322 { 323 u32 val; 324 325 if (!aa_unpack_u32(e, &val, name)) 326 return false; 327 data->val = (u32)data->val | ((u64)val << 32); 328 return true; 329 } 330 331 VISIBLE_IF_KUNIT bool aa_unpack_array(struct aa_ext *e, const char *name, u16 *size) 332 { 333 void *pos = e->pos; 334 335 if (aa_unpack_nameX(e, AA_ARRAY, name)) { 336 if (!aa_inbounds(e, sizeof(u16))) 337 goto fail; 338 *size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 339 e->pos += sizeof(u16); 340 return true; 341 } 342 343 fail: 344 e->pos = pos; 345 return false; 346 } 347 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array); 348 349 VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name) 350 { 351 void *pos = e->pos; 352 353 if (aa_unpack_nameX(e, AA_BLOB, name)) { 354 u32 size; 355 if (!aa_inbounds(e, sizeof(u32))) 356 goto fail; 357 size = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 358 e->pos += sizeof(u32); 359 if (aa_inbounds(e, (size_t) size)) { 360 *blob = e->pos; 361 e->pos += size; 362 return size; 363 } 364 } 365 366 fail: 367 e->pos = pos; 368 return 0; 369 } 370 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob); 371 372 VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name) 373 { 374 char *src_str; 375 size_t size = 0; 376 void *pos = e->pos; 377 *string = NULL; 378 if (aa_unpack_nameX(e, AA_STRING, name)) { 379 size = aa_unpack_u16_chunk(e, &src_str); 380 if (size) { 381 /* strings are null terminated, length is size - 1 */ 382 if (src_str[size - 1] != 0) 383 goto fail; 384 *string = src_str; 385 386 return size; 387 } 388 } 389 390 fail: 391 e->pos = pos; 392 return 0; 393 } 394 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str); 395 396 VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name) 397 { 398 const char *tmp; 399 void *pos = e->pos; 400 int res = aa_unpack_str(e, &tmp, name); 401 *string = NULL; 402 403 if (!res) 404 return 0; 405 406 *string = kmemdup(tmp, res, GFP_KERNEL); 407 if (!*string) { 408 e->pos = pos; 409 return 0; 410 } 411 412 return res; 413 } 414 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup); 415 416 417 /** 418 * unpack_dfa - unpack a file rule dfa 419 * @e: serialized data extent information (NOT NULL) 420 * @flags: dfa flags to check 421 * 422 * returns dfa or ERR_PTR or NULL if no dfa 423 */ 424 static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags) 425 { 426 char *blob = NULL; 427 size_t size; 428 struct aa_dfa *dfa = NULL; 429 430 size = aa_unpack_blob(e, &blob, "aadfa"); 431 if (size) { 432 /* 433 * The dfa is aligned with in the blob to 8 bytes 434 * from the beginning of the stream. 435 * alignment adjust needed by dfa unpack 436 */ 437 size_t sz = blob - (char *) e->start - 438 ((e->pos - e->start) & 7); 439 size_t pad = ALIGN(sz, 8) - sz; 440 if (aa_g_paranoid_load) 441 flags |= DFA_FLAG_VERIFY_STATES; 442 dfa = aa_dfa_unpack(blob + pad, size - pad, flags); 443 444 if (IS_ERR(dfa)) 445 return dfa; 446 447 } 448 449 return dfa; 450 } 451 452 /** 453 * unpack_trans_table - unpack a profile transition table 454 * @e: serialized data extent information (NOT NULL) 455 * @strs: str table to unpack to (NOT NULL) 456 * 457 * Returns: true if table successfully unpacked or not present 458 */ 459 static bool unpack_trans_table(struct aa_ext *e, struct aa_str_table *strs) 460 { 461 void *saved_pos = e->pos; 462 char **table = NULL; 463 464 /* exec table is optional */ 465 if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) { 466 u16 size; 467 int i; 468 469 if (!aa_unpack_array(e, NULL, &size)) 470 /* 471 * Note: index into trans table array is a max 472 * of 2^24, but unpack array can only unpack 473 * an array of 2^16 in size atm so no need 474 * for size check here 475 */ 476 goto fail; 477 table = kcalloc(size, sizeof(char *), GFP_KERNEL); 478 if (!table) 479 goto fail; 480 481 for (i = 0; i < size; i++) { 482 char *str; 483 int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL); 484 /* aa_unpack_strdup verifies that the last character is 485 * null termination byte. 486 */ 487 if (!size2) 488 goto fail; 489 table[i] = str; 490 /* verify that name doesn't start with space */ 491 if (isspace(*str)) 492 goto fail; 493 494 /* count internal # of internal \0 */ 495 for (c = j = 0; j < size2 - 1; j++) { 496 if (!str[j]) { 497 pos = j; 498 c++; 499 } 500 } 501 if (*str == ':') { 502 /* first character after : must be valid */ 503 if (!str[1]) 504 goto fail; 505 /* beginning with : requires an embedded \0, 506 * verify that exactly 1 internal \0 exists 507 * trailing \0 already verified by aa_unpack_strdup 508 * 509 * convert \0 back to : for label_parse 510 */ 511 if (c == 1) 512 str[pos] = ':'; 513 else if (c > 1) 514 goto fail; 515 } else if (c) 516 /* fail - all other cases with embedded \0 */ 517 goto fail; 518 } 519 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 520 goto fail; 521 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 522 goto fail; 523 524 strs->table = table; 525 strs->size = size; 526 } 527 return true; 528 529 fail: 530 kfree_sensitive(table); 531 e->pos = saved_pos; 532 return false; 533 } 534 535 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile) 536 { 537 void *pos = e->pos; 538 539 if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) { 540 u16 size; 541 int i; 542 543 if (!aa_unpack_array(e, NULL, &size)) 544 goto fail; 545 profile->attach.xattr_count = size; 546 profile->attach.xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL); 547 if (!profile->attach.xattrs) 548 goto fail; 549 for (i = 0; i < size; i++) { 550 if (!aa_unpack_strdup(e, &profile->attach.xattrs[i], NULL)) 551 goto fail; 552 } 553 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 554 goto fail; 555 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 556 goto fail; 557 } 558 559 return true; 560 561 fail: 562 e->pos = pos; 563 return false; 564 } 565 566 static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules) 567 { 568 void *pos = e->pos; 569 u16 size; 570 int i; 571 572 if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) { 573 if (!aa_unpack_array(e, NULL, &size)) 574 goto fail; 575 576 rules->secmark = kcalloc(size, sizeof(struct aa_secmark), 577 GFP_KERNEL); 578 if (!rules->secmark) 579 goto fail; 580 581 rules->secmark_count = size; 582 583 for (i = 0; i < size; i++) { 584 if (!unpack_u8(e, &rules->secmark[i].audit, NULL)) 585 goto fail; 586 if (!unpack_u8(e, &rules->secmark[i].deny, NULL)) 587 goto fail; 588 if (!aa_unpack_strdup(e, &rules->secmark[i].label, NULL)) 589 goto fail; 590 } 591 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 592 goto fail; 593 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 594 goto fail; 595 } 596 597 return true; 598 599 fail: 600 if (rules->secmark) { 601 for (i = 0; i < size; i++) 602 kfree(rules->secmark[i].label); 603 kfree(rules->secmark); 604 rules->secmark_count = 0; 605 rules->secmark = NULL; 606 } 607 608 e->pos = pos; 609 return false; 610 } 611 612 static bool unpack_rlimits(struct aa_ext *e, struct aa_ruleset *rules) 613 { 614 void *pos = e->pos; 615 616 /* rlimits are optional */ 617 if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) { 618 u16 size; 619 int i; 620 u32 tmp = 0; 621 if (!aa_unpack_u32(e, &tmp, NULL)) 622 goto fail; 623 rules->rlimits.mask = tmp; 624 625 if (!aa_unpack_array(e, NULL, &size) || 626 size > RLIM_NLIMITS) 627 goto fail; 628 for (i = 0; i < size; i++) { 629 u64 tmp2 = 0; 630 int a = aa_map_resource(i); 631 if (!aa_unpack_u64(e, &tmp2, NULL)) 632 goto fail; 633 rules->rlimits.limits[a].rlim_max = tmp2; 634 } 635 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 636 goto fail; 637 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 638 goto fail; 639 } 640 return true; 641 642 fail: 643 e->pos = pos; 644 return false; 645 } 646 647 static bool unpack_perm(struct aa_ext *e, u32 version, struct aa_perms *perm) 648 { 649 if (version != 1) 650 return false; 651 652 return aa_unpack_u32(e, &perm->allow, NULL) && 653 aa_unpack_u32(e, &perm->allow, NULL) && 654 aa_unpack_u32(e, &perm->deny, NULL) && 655 aa_unpack_u32(e, &perm->subtree, NULL) && 656 aa_unpack_u32(e, &perm->cond, NULL) && 657 aa_unpack_u32(e, &perm->kill, NULL) && 658 aa_unpack_u32(e, &perm->complain, NULL) && 659 aa_unpack_u32(e, &perm->prompt, NULL) && 660 aa_unpack_u32(e, &perm->audit, NULL) && 661 aa_unpack_u32(e, &perm->quiet, NULL) && 662 aa_unpack_u32(e, &perm->hide, NULL) && 663 aa_unpack_u32(e, &perm->xindex, NULL) && 664 aa_unpack_u32(e, &perm->tag, NULL) && 665 aa_unpack_u32(e, &perm->label, NULL); 666 } 667 668 static ssize_t unpack_perms_table(struct aa_ext *e, struct aa_perms **perms) 669 { 670 void *pos = e->pos; 671 u16 size = 0; 672 673 AA_BUG(!perms); 674 /* 675 * policy perms are optional, in which case perms are embedded 676 * in the dfa accept table 677 */ 678 if (aa_unpack_nameX(e, AA_STRUCT, "perms")) { 679 int i; 680 u32 version; 681 682 if (!aa_unpack_u32(e, &version, "version")) 683 goto fail_reset; 684 if (!aa_unpack_array(e, NULL, &size)) 685 goto fail_reset; 686 *perms = kcalloc(size, sizeof(struct aa_perms), GFP_KERNEL); 687 if (!*perms) 688 goto fail_reset; 689 for (i = 0; i < size; i++) { 690 if (!unpack_perm(e, version, &(*perms)[i])) 691 goto fail; 692 } 693 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 694 goto fail; 695 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 696 goto fail; 697 } else 698 *perms = NULL; 699 700 return size; 701 702 fail: 703 kfree(*perms); 704 fail_reset: 705 e->pos = pos; 706 return -EPROTO; 707 } 708 709 static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy, 710 bool required_dfa, bool required_trans, 711 const char **info) 712 { 713 struct aa_policydb *pdb; 714 void *pos = e->pos; 715 int i, flags, error = -EPROTO; 716 ssize_t size; 717 718 pdb = aa_alloc_pdb(GFP_KERNEL); 719 if (!pdb) 720 return -ENOMEM; 721 722 size = unpack_perms_table(e, &pdb->perms); 723 if (size < 0) { 724 error = size; 725 pdb->perms = NULL; 726 *info = "failed to unpack - perms"; 727 goto fail; 728 } 729 pdb->size = size; 730 731 if (pdb->perms) { 732 /* perms table present accept is index */ 733 flags = TO_ACCEPT1_FLAG(YYTD_DATA32); 734 } else { 735 /* packed perms in accept1 and accept2 */ 736 flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | 737 TO_ACCEPT2_FLAG(YYTD_DATA32); 738 } 739 740 pdb->dfa = unpack_dfa(e, flags); 741 if (IS_ERR(pdb->dfa)) { 742 error = PTR_ERR(pdb->dfa); 743 pdb->dfa = NULL; 744 *info = "failed to unpack - dfa"; 745 goto fail; 746 } else if (!pdb->dfa) { 747 if (required_dfa) { 748 *info = "missing required dfa"; 749 goto fail; 750 } 751 goto out; 752 } 753 754 /* 755 * only unpack the following if a dfa is present 756 * 757 * sadly start was given different names for file and policydb 758 * but since it is optional we can try both 759 */ 760 if (!aa_unpack_u32(e, &pdb->start[0], "start")) 761 /* default start state */ 762 pdb->start[0] = DFA_START; 763 if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) { 764 /* default start state for xmatch and file dfa */ 765 pdb->start[AA_CLASS_FILE] = DFA_START; 766 } /* setup class index */ 767 for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) { 768 pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0], 769 i); 770 } 771 if (!unpack_trans_table(e, &pdb->trans) && required_trans) { 772 *info = "failed to unpack profile transition table"; 773 goto fail; 774 } 775 776 /* TODO: move compat mapping here, requires dfa merging first */ 777 /* TODO: move verify here, it has to be done after compat mappings */ 778 out: 779 *policy = pdb; 780 return 0; 781 782 fail: 783 aa_put_pdb(pdb); 784 e->pos = pos; 785 return error; 786 } 787 788 static u32 strhash(const void *data, u32 len, u32 seed) 789 { 790 const char * const *key = data; 791 792 return jhash(*key, strlen(*key), seed); 793 } 794 795 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj) 796 { 797 const struct aa_data *data = obj; 798 const char * const *key = arg->key; 799 800 return strcmp(data->key, *key); 801 } 802 803 /** 804 * unpack_profile - unpack a serialized profile 805 * @e: serialized data extent information (NOT NULL) 806 * @ns_name: pointer of newly allocated copy of %NULL in case of error 807 * 808 * NOTE: unpack profile sets audit struct if there is a failure 809 */ 810 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) 811 { 812 struct aa_ruleset *rules; 813 struct aa_profile *profile = NULL; 814 const char *tmpname, *tmpns = NULL, *name = NULL; 815 const char *info = "failed to unpack profile"; 816 size_t ns_len; 817 struct rhashtable_params params = { 0 }; 818 char *key = NULL, *disconnected = NULL; 819 struct aa_data *data; 820 int error = -EPROTO; 821 kernel_cap_t tmpcap; 822 u32 tmp; 823 824 *ns_name = NULL; 825 826 /* check that we have the right struct being passed */ 827 if (!aa_unpack_nameX(e, AA_STRUCT, "profile")) 828 goto fail; 829 if (!aa_unpack_str(e, &name, NULL)) 830 goto fail; 831 if (*name == '\0') 832 goto fail; 833 834 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); 835 if (tmpns) { 836 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); 837 if (!*ns_name) { 838 info = "out of memory"; 839 error = -ENOMEM; 840 goto fail; 841 } 842 name = tmpname; 843 } 844 845 profile = aa_alloc_profile(name, NULL, GFP_KERNEL); 846 if (!profile) { 847 info = "out of memory"; 848 error = -ENOMEM; 849 goto fail; 850 } 851 rules = list_first_entry(&profile->rules, typeof(*rules), list); 852 853 /* profile renaming is optional */ 854 (void) aa_unpack_str(e, &profile->rename, "rename"); 855 856 /* attachment string is optional */ 857 (void) aa_unpack_str(e, &profile->attach.xmatch_str, "attach"); 858 859 /* xmatch is optional and may be NULL */ 860 error = unpack_pdb(e, &profile->attach.xmatch, false, false, &info); 861 if (error) { 862 info = "bad xmatch"; 863 goto fail; 864 } 865 866 /* neither xmatch_len not xmatch_perms are optional if xmatch is set */ 867 if (profile->attach.xmatch->dfa) { 868 if (!aa_unpack_u32(e, &tmp, NULL)) { 869 info = "missing xmatch len"; 870 goto fail; 871 } 872 profile->attach.xmatch_len = tmp; 873 profile->attach.xmatch->start[AA_CLASS_XMATCH] = DFA_START; 874 if (!profile->attach.xmatch->perms) { 875 error = aa_compat_map_xmatch(profile->attach.xmatch); 876 if (error) { 877 info = "failed to convert xmatch permission table"; 878 goto fail; 879 } 880 } 881 } 882 883 /* disconnected attachment string is optional */ 884 (void) aa_unpack_strdup(e, &disconnected, "disconnected"); 885 profile->disconnected = disconnected; 886 887 /* per profile debug flags (complain, audit) */ 888 if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) { 889 info = "profile missing flags"; 890 goto fail; 891 } 892 info = "failed to unpack profile flags"; 893 if (!aa_unpack_u32(e, &tmp, NULL)) 894 goto fail; 895 if (tmp & PACKED_FLAG_HAT) 896 profile->label.flags |= FLAG_HAT; 897 if (tmp & PACKED_FLAG_DEBUG1) 898 profile->label.flags |= FLAG_DEBUG1; 899 if (tmp & PACKED_FLAG_DEBUG2) 900 profile->label.flags |= FLAG_DEBUG2; 901 if (!aa_unpack_u32(e, &tmp, NULL)) 902 goto fail; 903 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) { 904 profile->mode = APPARMOR_COMPLAIN; 905 } else if (tmp == PACKED_MODE_ENFORCE) { 906 profile->mode = APPARMOR_ENFORCE; 907 } else if (tmp == PACKED_MODE_KILL) { 908 profile->mode = APPARMOR_KILL; 909 } else if (tmp == PACKED_MODE_UNCONFINED) { 910 profile->mode = APPARMOR_UNCONFINED; 911 profile->label.flags |= FLAG_UNCONFINED; 912 } else if (tmp == PACKED_MODE_USER) { 913 profile->mode = APPARMOR_USER; 914 } else { 915 goto fail; 916 } 917 if (!aa_unpack_u32(e, &tmp, NULL)) 918 goto fail; 919 if (tmp) 920 profile->audit = AUDIT_ALL; 921 922 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 923 goto fail; 924 925 /* path_flags is optional */ 926 if (aa_unpack_u32(e, &profile->path_flags, "path_flags")) 927 profile->path_flags |= profile->label.flags & 928 PATH_MEDIATE_DELETED; 929 else 930 /* set a default value if path_flags field is not present */ 931 profile->path_flags = PATH_MEDIATE_DELETED; 932 933 info = "failed to unpack profile capabilities"; 934 if (!aa_unpack_cap_low(e, &rules->caps.allow, NULL)) 935 goto fail; 936 if (!aa_unpack_cap_low(e, &rules->caps.audit, NULL)) 937 goto fail; 938 if (!aa_unpack_cap_low(e, &rules->caps.quiet, NULL)) 939 goto fail; 940 if (!aa_unpack_cap_low(e, &tmpcap, NULL)) 941 goto fail; 942 943 info = "failed to unpack upper profile capabilities"; 944 if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) { 945 /* optional upper half of 64 bit caps */ 946 if (!aa_unpack_cap_high(e, &rules->caps.allow, NULL)) 947 goto fail; 948 if (!aa_unpack_cap_high(e, &rules->caps.audit, NULL)) 949 goto fail; 950 if (!aa_unpack_cap_high(e, &rules->caps.quiet, NULL)) 951 goto fail; 952 if (!aa_unpack_cap_high(e, &tmpcap, NULL)) 953 goto fail; 954 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 955 goto fail; 956 } 957 958 info = "failed to unpack extended profile capabilities"; 959 if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) { 960 /* optional extended caps mediation mask */ 961 if (!aa_unpack_cap_low(e, &rules->caps.extended, NULL)) 962 goto fail; 963 if (!aa_unpack_cap_high(e, &rules->caps.extended, NULL)) 964 goto fail; 965 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 966 goto fail; 967 } 968 969 if (!unpack_xattrs(e, profile)) { 970 info = "failed to unpack profile xattrs"; 971 goto fail; 972 } 973 974 if (!unpack_rlimits(e, rules)) { 975 info = "failed to unpack profile rlimits"; 976 goto fail; 977 } 978 979 if (!unpack_secmark(e, rules)) { 980 info = "failed to unpack profile secmark rules"; 981 goto fail; 982 } 983 984 if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) { 985 /* generic policy dfa - optional and may be NULL */ 986 info = "failed to unpack policydb"; 987 error = unpack_pdb(e, &rules->policy, true, false, 988 &info); 989 if (error) 990 goto fail; 991 /* Fixup: drop when we get rid of start array */ 992 if (aa_dfa_next(rules->policy->dfa, rules->policy->start[0], 993 AA_CLASS_FILE)) 994 rules->policy->start[AA_CLASS_FILE] = 995 aa_dfa_next(rules->policy->dfa, 996 rules->policy->start[0], 997 AA_CLASS_FILE); 998 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 999 goto fail; 1000 if (!rules->policy->perms) { 1001 error = aa_compat_map_policy(rules->policy, 1002 e->version); 1003 if (error) { 1004 info = "failed to remap policydb permission table"; 1005 goto fail; 1006 } 1007 } 1008 } else { 1009 rules->policy = aa_get_pdb(nullpdb); 1010 } 1011 /* get file rules */ 1012 error = unpack_pdb(e, &rules->file, false, true, &info); 1013 if (error) { 1014 goto fail; 1015 } else if (rules->file->dfa) { 1016 if (!rules->file->perms) { 1017 error = aa_compat_map_file(rules->file); 1018 if (error) { 1019 info = "failed to remap file permission table"; 1020 goto fail; 1021 } 1022 } 1023 } else if (rules->policy->dfa && 1024 rules->policy->start[AA_CLASS_FILE]) { 1025 rules->file = aa_get_pdb(rules->policy); 1026 } else { 1027 rules->file = aa_get_pdb(nullpdb); 1028 } 1029 error = -EPROTO; 1030 if (aa_unpack_nameX(e, AA_STRUCT, "data")) { 1031 info = "out of memory"; 1032 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL); 1033 if (!profile->data) { 1034 error = -ENOMEM; 1035 goto fail; 1036 } 1037 params.nelem_hint = 3; 1038 params.key_len = sizeof(void *); 1039 params.key_offset = offsetof(struct aa_data, key); 1040 params.head_offset = offsetof(struct aa_data, head); 1041 params.hashfn = strhash; 1042 params.obj_cmpfn = datacmp; 1043 1044 if (rhashtable_init(profile->data, ¶ms)) { 1045 info = "failed to init key, value hash table"; 1046 goto fail; 1047 } 1048 1049 while (aa_unpack_strdup(e, &key, NULL)) { 1050 data = kzalloc(sizeof(*data), GFP_KERNEL); 1051 if (!data) { 1052 kfree_sensitive(key); 1053 error = -ENOMEM; 1054 goto fail; 1055 } 1056 1057 data->key = key; 1058 data->size = aa_unpack_blob(e, &data->data, NULL); 1059 data->data = kvmemdup(data->data, data->size, GFP_KERNEL); 1060 if (data->size && !data->data) { 1061 kfree_sensitive(data->key); 1062 kfree_sensitive(data); 1063 error = -ENOMEM; 1064 goto fail; 1065 } 1066 1067 if (rhashtable_insert_fast(profile->data, &data->head, 1068 profile->data->p)) { 1069 kfree_sensitive(data->key); 1070 kfree_sensitive(data); 1071 info = "failed to insert data to table"; 1072 goto fail; 1073 } 1074 } 1075 1076 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1077 info = "failed to unpack end of key, value data table"; 1078 goto fail; 1079 } 1080 } 1081 1082 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1083 info = "failed to unpack end of profile"; 1084 goto fail; 1085 } 1086 1087 return profile; 1088 1089 fail: 1090 if (error == 0) 1091 /* default error covers most cases */ 1092 error = -EPROTO; 1093 if (*ns_name) { 1094 kfree(*ns_name); 1095 *ns_name = NULL; 1096 } 1097 if (profile) 1098 name = NULL; 1099 else if (!name) 1100 name = "unknown"; 1101 audit_iface(profile, NULL, name, info, e, error); 1102 aa_free_profile(profile); 1103 1104 return ERR_PTR(error); 1105 } 1106 1107 /** 1108 * verify_header - unpack serialized stream header 1109 * @e: serialized data read head (NOT NULL) 1110 * @required: whether the header is required or optional 1111 * @ns: Returns - namespace if one is specified else NULL (NOT NULL) 1112 * 1113 * Returns: error or 0 if header is good 1114 */ 1115 static int verify_header(struct aa_ext *e, int required, const char **ns) 1116 { 1117 int error = -EPROTONOSUPPORT; 1118 const char *name = NULL; 1119 *ns = NULL; 1120 1121 /* get the interface version */ 1122 if (!aa_unpack_u32(e, &e->version, "version")) { 1123 if (required) { 1124 audit_iface(NULL, NULL, NULL, "invalid profile format", 1125 e, error); 1126 return error; 1127 } 1128 } 1129 1130 /* Check that the interface version is currently supported. 1131 * if not specified use previous version 1132 * Mask off everything that is not kernel abi version 1133 */ 1134 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) { 1135 audit_iface(NULL, NULL, NULL, "unsupported interface version", 1136 e, error); 1137 return error; 1138 } 1139 1140 /* read the namespace if present */ 1141 if (aa_unpack_str(e, &name, "namespace")) { 1142 if (*name == '\0') { 1143 audit_iface(NULL, NULL, NULL, "invalid namespace name", 1144 e, error); 1145 return error; 1146 } 1147 if (*ns && strcmp(*ns, name)) { 1148 audit_iface(NULL, NULL, NULL, "invalid ns change", e, 1149 error); 1150 } else if (!*ns) { 1151 *ns = kstrdup(name, GFP_KERNEL); 1152 if (!*ns) 1153 return -ENOMEM; 1154 } 1155 } 1156 1157 return 0; 1158 } 1159 1160 /** 1161 * verify_dfa_accept_index - verify accept indexes are in range of perms table 1162 * @dfa: the dfa to check accept indexes are in range 1163 * @table_size: the permission table size the indexes should be within 1164 */ 1165 static bool verify_dfa_accept_index(struct aa_dfa *dfa, int table_size) 1166 { 1167 int i; 1168 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) { 1169 if (ACCEPT_TABLE(dfa)[i] >= table_size) 1170 return false; 1171 } 1172 return true; 1173 } 1174 1175 static bool verify_perm(struct aa_perms *perm) 1176 { 1177 /* TODO: allow option to just force the perms into a valid state */ 1178 if (perm->allow & perm->deny) 1179 return false; 1180 if (perm->subtree & ~perm->allow) 1181 return false; 1182 if (perm->cond & (perm->allow | perm->deny)) 1183 return false; 1184 if (perm->kill & perm->allow) 1185 return false; 1186 if (perm->complain & (perm->allow | perm->deny)) 1187 return false; 1188 if (perm->prompt & (perm->allow | perm->deny)) 1189 return false; 1190 if (perm->complain & perm->prompt) 1191 return false; 1192 if (perm->hide & perm->allow) 1193 return false; 1194 1195 return true; 1196 } 1197 1198 static bool verify_perms(struct aa_policydb *pdb) 1199 { 1200 int i; 1201 1202 for (i = 0; i < pdb->size; i++) { 1203 if (!verify_perm(&pdb->perms[i])) 1204 return false; 1205 /* verify indexes into str table */ 1206 if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE && 1207 (pdb->perms[i].xindex & AA_X_INDEX_MASK) >= pdb->trans.size) 1208 return false; 1209 if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->trans.size) 1210 return false; 1211 if (pdb->perms[i].label && 1212 pdb->perms[i].label >= pdb->trans.size) 1213 return false; 1214 } 1215 1216 return true; 1217 } 1218 1219 /** 1220 * verify_profile - Do post unpack analysis to verify profile consistency 1221 * @profile: profile to verify (NOT NULL) 1222 * 1223 * Returns: 0 if passes verification else error 1224 * 1225 * This verification is post any unpack mapping or changes 1226 */ 1227 static int verify_profile(struct aa_profile *profile) 1228 { 1229 struct aa_ruleset *rules = list_first_entry(&profile->rules, 1230 typeof(*rules), list); 1231 if (!rules) 1232 return 0; 1233 1234 if (rules->file->dfa && !verify_dfa_accept_index(rules->file->dfa, 1235 rules->file->size)) { 1236 audit_iface(profile, NULL, NULL, 1237 "Unpack: file Invalid named transition", NULL, 1238 -EPROTO); 1239 return -EPROTO; 1240 } 1241 if (rules->policy->dfa && 1242 !verify_dfa_accept_index(rules->policy->dfa, rules->policy->size)) { 1243 audit_iface(profile, NULL, NULL, 1244 "Unpack: policy Invalid named transition", NULL, 1245 -EPROTO); 1246 return -EPROTO; 1247 } 1248 1249 if (!verify_perms(rules->file)) { 1250 audit_iface(profile, NULL, NULL, 1251 "Unpack: Invalid perm index", NULL, -EPROTO); 1252 return -EPROTO; 1253 } 1254 if (!verify_perms(rules->policy)) { 1255 audit_iface(profile, NULL, NULL, 1256 "Unpack: Invalid perm index", NULL, -EPROTO); 1257 return -EPROTO; 1258 } 1259 if (!verify_perms(profile->attach.xmatch)) { 1260 audit_iface(profile, NULL, NULL, 1261 "Unpack: Invalid perm index", NULL, -EPROTO); 1262 return -EPROTO; 1263 } 1264 1265 return 0; 1266 } 1267 1268 void aa_load_ent_free(struct aa_load_ent *ent) 1269 { 1270 if (ent) { 1271 aa_put_profile(ent->rename); 1272 aa_put_profile(ent->old); 1273 aa_put_profile(ent->new); 1274 kfree(ent->ns_name); 1275 kfree_sensitive(ent); 1276 } 1277 } 1278 1279 struct aa_load_ent *aa_load_ent_alloc(void) 1280 { 1281 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL); 1282 if (ent) 1283 INIT_LIST_HEAD(&ent->list); 1284 return ent; 1285 } 1286 1287 static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen) 1288 { 1289 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY 1290 const zstd_parameters params = 1291 zstd_get_params(aa_g_rawdata_compression_level, slen); 1292 const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams); 1293 void *wksp = NULL; 1294 zstd_cctx *ctx = NULL; 1295 size_t out_len = zstd_compress_bound(slen); 1296 void *out = NULL; 1297 int ret = 0; 1298 1299 out = kvzalloc(out_len, GFP_KERNEL); 1300 if (!out) { 1301 ret = -ENOMEM; 1302 goto cleanup; 1303 } 1304 1305 wksp = kvzalloc(wksp_len, GFP_KERNEL); 1306 if (!wksp) { 1307 ret = -ENOMEM; 1308 goto cleanup; 1309 } 1310 1311 ctx = zstd_init_cctx(wksp, wksp_len); 1312 if (!ctx) { 1313 ret = -EINVAL; 1314 goto cleanup; 1315 } 1316 1317 out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms); 1318 if (zstd_is_error(out_len) || out_len >= slen) { 1319 ret = -EINVAL; 1320 goto cleanup; 1321 } 1322 1323 if (is_vmalloc_addr(out)) { 1324 *dst = kvzalloc(out_len, GFP_KERNEL); 1325 if (*dst) { 1326 memcpy(*dst, out, out_len); 1327 kvfree(out); 1328 out = NULL; 1329 } 1330 } else { 1331 /* 1332 * If the staging buffer was kmalloc'd, then using krealloc is 1333 * probably going to be faster. The destination buffer will 1334 * always be smaller, so it's just shrunk, avoiding a memcpy 1335 */ 1336 *dst = krealloc(out, out_len, GFP_KERNEL); 1337 } 1338 1339 if (!*dst) { 1340 ret = -ENOMEM; 1341 goto cleanup; 1342 } 1343 1344 *dlen = out_len; 1345 1346 cleanup: 1347 if (ret) { 1348 kvfree(out); 1349 *dst = NULL; 1350 } 1351 1352 kvfree(wksp); 1353 return ret; 1354 #else 1355 *dlen = slen; 1356 return 0; 1357 #endif 1358 } 1359 1360 static int compress_loaddata(struct aa_loaddata *data) 1361 { 1362 AA_BUG(data->compressed_size > 0); 1363 1364 /* 1365 * Shortcut the no compression case, else we increase the amount of 1366 * storage required by a small amount 1367 */ 1368 if (aa_g_rawdata_compression_level != 0) { 1369 void *udata = data->data; 1370 int error = compress_zstd(udata, data->size, &data->data, 1371 &data->compressed_size); 1372 if (error) { 1373 data->compressed_size = data->size; 1374 return error; 1375 } 1376 if (udata != data->data) 1377 kvfree(udata); 1378 } else 1379 data->compressed_size = data->size; 1380 1381 return 0; 1382 } 1383 1384 /** 1385 * aa_unpack - unpack packed binary profile(s) data loaded from user space 1386 * @udata: user data copied to kmem (NOT NULL) 1387 * @lh: list to place unpacked profiles in a aa_repl_ws 1388 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL) 1389 * 1390 * Unpack user data and return refcounted allocated profile(s) stored in 1391 * @lh in order of discovery, with the list chain stored in base.list 1392 * or error 1393 * 1394 * Returns: profile(s) on @lh else error pointer if fails to unpack 1395 */ 1396 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, 1397 const char **ns) 1398 { 1399 struct aa_load_ent *tmp, *ent; 1400 struct aa_profile *profile = NULL; 1401 char *ns_name = NULL; 1402 int error; 1403 struct aa_ext e = { 1404 .start = udata->data, 1405 .end = udata->data + udata->size, 1406 .pos = udata->data, 1407 }; 1408 1409 *ns = NULL; 1410 while (e.pos < e.end) { 1411 void *start; 1412 error = verify_header(&e, e.pos == e.start, ns); 1413 if (error) 1414 goto fail; 1415 1416 start = e.pos; 1417 profile = unpack_profile(&e, &ns_name); 1418 if (IS_ERR(profile)) { 1419 error = PTR_ERR(profile); 1420 goto fail; 1421 } 1422 1423 error = verify_profile(profile); 1424 if (error) 1425 goto fail_profile; 1426 1427 if (aa_g_hash_policy) 1428 error = aa_calc_profile_hash(profile, e.version, start, 1429 e.pos - start); 1430 if (error) 1431 goto fail_profile; 1432 1433 ent = aa_load_ent_alloc(); 1434 if (!ent) { 1435 error = -ENOMEM; 1436 goto fail_profile; 1437 } 1438 1439 ent->new = profile; 1440 ent->ns_name = ns_name; 1441 ns_name = NULL; 1442 list_add_tail(&ent->list, lh); 1443 } 1444 udata->abi = e.version & K_ABI_MASK; 1445 if (aa_g_hash_policy) { 1446 udata->hash = aa_calc_hash(udata->data, udata->size); 1447 if (IS_ERR(udata->hash)) { 1448 error = PTR_ERR(udata->hash); 1449 udata->hash = NULL; 1450 goto fail; 1451 } 1452 } 1453 1454 if (aa_g_export_binary) { 1455 error = compress_loaddata(udata); 1456 if (error) 1457 goto fail; 1458 } 1459 return 0; 1460 1461 fail_profile: 1462 kfree(ns_name); 1463 aa_put_profile(profile); 1464 1465 fail: 1466 list_for_each_entry_safe(ent, tmp, lh, list) { 1467 list_del_init(&ent->list); 1468 aa_load_ent_free(ent); 1469 } 1470 1471 return error; 1472 } 1473