1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor functions for unpacking policy loaded from 6 * userspace. 7 * 8 * Copyright (C) 1998-2008 Novell/SUSE 9 * Copyright 2009-2010 Canonical Ltd. 10 * 11 * AppArmor uses a serialized binary format for loading policy. To find 12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst 13 * All policy is validated before it is used. 14 */ 15 16 #include <linux/unaligned.h> 17 #include <kunit/visibility.h> 18 #include <linux/ctype.h> 19 #include <linux/errno.h> 20 #include <linux/zstd.h> 21 22 #include "include/apparmor.h" 23 #include "include/audit.h" 24 #include "include/cred.h" 25 #include "include/crypto.h" 26 #include "include/file.h" 27 #include "include/match.h" 28 #include "include/path.h" 29 #include "include/policy.h" 30 #include "include/policy_unpack.h" 31 #include "include/policy_compat.h" 32 #include "include/signal.h" 33 34 /* audit callback for unpack fields */ 35 static void audit_cb(struct audit_buffer *ab, void *va) 36 { 37 struct common_audit_data *sa = va; 38 struct apparmor_audit_data *ad = aad(sa); 39 40 if (ad->iface.ns) { 41 audit_log_format(ab, " ns="); 42 audit_log_untrustedstring(ab, ad->iface.ns); 43 } 44 if (ad->name) { 45 audit_log_format(ab, " name="); 46 audit_log_untrustedstring(ab, ad->name); 47 } 48 if (ad->iface.pos) 49 audit_log_format(ab, " offset=%ld", ad->iface.pos); 50 } 51 52 /** 53 * audit_iface - do audit message for policy unpacking/load/replace/remove 54 * @new: profile if it has been allocated (MAYBE NULL) 55 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL) 56 * @name: name of the profile being manipulated (MAYBE NULL) 57 * @info: any extra info about the failure (MAYBE NULL) 58 * @e: buffer position info 59 * @error: error code 60 * 61 * Returns: %0 or error 62 */ 63 static int audit_iface(struct aa_profile *new, const char *ns_name, 64 const char *name, const char *info, struct aa_ext *e, 65 int error) 66 { 67 struct aa_profile *profile = labels_profile(aa_current_raw_label()); 68 DEFINE_AUDIT_DATA(ad, LSM_AUDIT_DATA_NONE, AA_CLASS_NONE, NULL); 69 if (e) 70 ad.iface.pos = e->pos - e->start; 71 ad.iface.ns = ns_name; 72 if (new) 73 ad.name = new->base.hname; 74 else 75 ad.name = name; 76 ad.info = info; 77 ad.error = error; 78 79 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &ad, audit_cb); 80 } 81 82 void __aa_loaddata_update(struct aa_loaddata *data, long revision) 83 { 84 AA_BUG(!data); 85 AA_BUG(!data->ns); 86 AA_BUG(!mutex_is_locked(&data->ns->lock)); 87 AA_BUG(data->revision > revision); 88 89 data->revision = revision; 90 if ((data->dents[AAFS_LOADDATA_REVISION])) { 91 struct inode *inode; 92 93 inode = d_inode(data->dents[AAFS_LOADDATA_DIR]); 94 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 95 96 inode = d_inode(data->dents[AAFS_LOADDATA_REVISION]); 97 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); 98 } 99 } 100 101 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r) 102 { 103 if (l->size != r->size) 104 return false; 105 if (l->compressed_size != r->compressed_size) 106 return false; 107 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0) 108 return false; 109 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0; 110 } 111 112 /* 113 * need to take the ns mutex lock which is NOT safe most places that 114 * put_loaddata is called, so we have to delay freeing it 115 */ 116 static void do_loaddata_free(struct work_struct *work) 117 { 118 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work); 119 struct aa_ns *ns = aa_get_ns(d->ns); 120 121 if (ns) { 122 mutex_lock_nested(&ns->lock, ns->level); 123 __aa_fs_remove_rawdata(d); 124 mutex_unlock(&ns->lock); 125 aa_put_ns(ns); 126 } 127 128 kfree_sensitive(d->hash); 129 kfree_sensitive(d->name); 130 kvfree(d->data); 131 kfree_sensitive(d); 132 } 133 134 void aa_loaddata_kref(struct kref *kref) 135 { 136 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count); 137 138 if (d) { 139 INIT_WORK(&d->work, do_loaddata_free); 140 schedule_work(&d->work); 141 } 142 } 143 144 struct aa_loaddata *aa_loaddata_alloc(size_t size) 145 { 146 struct aa_loaddata *d; 147 148 d = kzalloc_obj(*d, GFP_KERNEL); 149 if (d == NULL) 150 return ERR_PTR(-ENOMEM); 151 d->data = kvzalloc(size, GFP_KERNEL); 152 if (!d->data) { 153 kfree(d); 154 return ERR_PTR(-ENOMEM); 155 } 156 kref_init(&d->count); 157 INIT_LIST_HEAD(&d->list); 158 159 return d; 160 } 161 162 /* test if read will be in packed data bounds */ 163 VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size) 164 { 165 return (size <= e->end - e->pos); 166 } 167 EXPORT_SYMBOL_IF_KUNIT(aa_inbounds); 168 169 /** 170 * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk 171 * @e: serialized data read head (NOT NULL) 172 * @chunk: start address for chunk of data (NOT NULL) 173 * 174 * Returns: the size of chunk found with the read head at the end of the chunk. 175 */ 176 VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk) 177 { 178 size_t size = 0; 179 void *pos = e->pos; 180 181 if (!aa_inbounds(e, sizeof(u16))) 182 goto fail; 183 size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 184 e->pos += sizeof(__le16); 185 if (!aa_inbounds(e, size)) 186 goto fail; 187 *chunk = e->pos; 188 e->pos += size; 189 return size; 190 191 fail: 192 e->pos = pos; 193 return 0; 194 } 195 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk); 196 197 /* unpack control byte */ 198 VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code) 199 { 200 if (!aa_inbounds(e, 1)) 201 return false; 202 if (*(u8 *) e->pos != code) 203 return false; 204 e->pos++; 205 return true; 206 } 207 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X); 208 209 /** 210 * aa_unpack_nameX - check is the next element is of type X with a name of @name 211 * @e: serialized data extent information (NOT NULL) 212 * @code: type code 213 * @name: name to match to the serialized element. (MAYBE NULL) 214 * 215 * check that the next serialized data element is of type X and has a tag 216 * name @name. If @name is specified then there must be a matching 217 * name element in the stream. If @name is NULL any name element will be 218 * skipped and only the typecode will be tested. 219 * 220 * Returns true on success (both type code and name tests match) and the read 221 * head is advanced past the headers 222 * 223 * Returns: false if either match fails, the read head does not move 224 */ 225 VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name) 226 { 227 /* 228 * May need to reset pos if name or type doesn't match 229 */ 230 void *pos = e->pos; 231 /* 232 * Check for presence of a tagname, and if present name size 233 * AA_NAME tag value is a u16. 234 */ 235 if (aa_unpack_X(e, AA_NAME)) { 236 char *tag = NULL; 237 size_t size = aa_unpack_u16_chunk(e, &tag); 238 /* if a name is specified it must match. otherwise skip tag */ 239 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag))) 240 goto fail; 241 } else if (name) { 242 /* if a name is specified and there is no name tag fail */ 243 goto fail; 244 } 245 246 /* now check if type code matches */ 247 if (aa_unpack_X(e, code)) 248 return true; 249 250 fail: 251 e->pos = pos; 252 return false; 253 } 254 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX); 255 256 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name) 257 { 258 void *pos = e->pos; 259 260 if (aa_unpack_nameX(e, AA_U8, name)) { 261 if (!aa_inbounds(e, sizeof(u8))) 262 goto fail; 263 if (data) 264 *data = *((u8 *)e->pos); 265 e->pos += sizeof(u8); 266 return true; 267 } 268 269 fail: 270 e->pos = pos; 271 return false; 272 } 273 274 VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name) 275 { 276 void *pos = e->pos; 277 278 if (aa_unpack_nameX(e, AA_U32, name)) { 279 if (!aa_inbounds(e, sizeof(u32))) 280 goto fail; 281 if (data) 282 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 283 e->pos += sizeof(u32); 284 return true; 285 } 286 287 fail: 288 e->pos = pos; 289 return false; 290 } 291 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32); 292 293 VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name) 294 { 295 void *pos = e->pos; 296 297 if (aa_unpack_nameX(e, AA_U64, name)) { 298 if (!aa_inbounds(e, sizeof(u64))) 299 goto fail; 300 if (data) 301 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos)); 302 e->pos += sizeof(u64); 303 return true; 304 } 305 306 fail: 307 e->pos = pos; 308 return false; 309 } 310 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64); 311 312 static bool aa_unpack_cap_low(struct aa_ext *e, kernel_cap_t *data, const char *name) 313 { 314 u32 val; 315 316 if (!aa_unpack_u32(e, &val, name)) 317 return false; 318 data->val = val; 319 return true; 320 } 321 322 static bool aa_unpack_cap_high(struct aa_ext *e, kernel_cap_t *data, const char *name) 323 { 324 u32 val; 325 326 if (!aa_unpack_u32(e, &val, name)) 327 return false; 328 data->val = (u32)data->val | ((u64)val << 32); 329 return true; 330 } 331 332 VISIBLE_IF_KUNIT bool aa_unpack_array(struct aa_ext *e, const char *name, u16 *size) 333 { 334 void *pos = e->pos; 335 336 if (aa_unpack_nameX(e, AA_ARRAY, name)) { 337 if (!aa_inbounds(e, sizeof(u16))) 338 goto fail; 339 *size = le16_to_cpu(get_unaligned((__le16 *) e->pos)); 340 e->pos += sizeof(u16); 341 return true; 342 } 343 344 fail: 345 e->pos = pos; 346 return false; 347 } 348 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array); 349 350 VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name) 351 { 352 void *pos = e->pos; 353 354 if (aa_unpack_nameX(e, AA_BLOB, name)) { 355 u32 size; 356 if (!aa_inbounds(e, sizeof(u32))) 357 goto fail; 358 size = le32_to_cpu(get_unaligned((__le32 *) e->pos)); 359 e->pos += sizeof(u32); 360 if (aa_inbounds(e, (size_t) size)) { 361 *blob = e->pos; 362 e->pos += size; 363 return size; 364 } 365 } 366 367 fail: 368 e->pos = pos; 369 return 0; 370 } 371 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob); 372 373 VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name) 374 { 375 char *src_str; 376 size_t size = 0; 377 void *pos = e->pos; 378 *string = NULL; 379 if (aa_unpack_nameX(e, AA_STRING, name)) { 380 size = aa_unpack_u16_chunk(e, &src_str); 381 if (size) { 382 /* strings are null terminated, length is size - 1 */ 383 if (src_str[size - 1] != 0) 384 goto fail; 385 *string = src_str; 386 387 return size; 388 } 389 } 390 391 fail: 392 e->pos = pos; 393 return 0; 394 } 395 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str); 396 397 VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name) 398 { 399 const char *tmp; 400 void *pos = e->pos; 401 int res = aa_unpack_str(e, &tmp, name); 402 *string = NULL; 403 404 if (!res) 405 return 0; 406 407 *string = kmemdup(tmp, res, GFP_KERNEL); 408 if (!*string) { 409 e->pos = pos; 410 return 0; 411 } 412 413 return res; 414 } 415 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup); 416 417 418 /** 419 * unpack_dfa - unpack a file rule dfa 420 * @e: serialized data extent information (NOT NULL) 421 * @flags: dfa flags to check 422 * 423 * returns dfa or ERR_PTR or NULL if no dfa 424 */ 425 static struct aa_dfa *unpack_dfa(struct aa_ext *e, int flags) 426 { 427 char *blob = NULL; 428 size_t size; 429 struct aa_dfa *dfa = NULL; 430 431 size = aa_unpack_blob(e, &blob, "aadfa"); 432 if (size) { 433 /* 434 * The dfa is aligned with in the blob to 8 bytes 435 * from the beginning of the stream. 436 * alignment adjust needed by dfa unpack 437 */ 438 size_t sz = blob - (char *) e->start - 439 ((e->pos - e->start) & 7); 440 size_t pad = ALIGN(sz, 8) - sz; 441 if (aa_g_paranoid_load) 442 flags |= DFA_FLAG_VERIFY_STATES; 443 dfa = aa_dfa_unpack(blob + pad, size - pad, flags); 444 445 if (IS_ERR(dfa)) 446 return dfa; 447 448 } 449 450 return dfa; 451 } 452 453 static int process_strs_entry(char *str, int size, bool multi) 454 { 455 int c = 1; 456 457 if (size <= 0) 458 return -1; 459 if (multi) { 460 if (size < 2) 461 return -2; 462 /* multi ends with double \0 */ 463 if (str[size - 2]) 464 return -3; 465 } 466 467 char *save = str; 468 char *pos = str; 469 char *end = multi ? str + size - 2 : str + size - 1; 470 /* count # of internal \0 */ 471 while (str < end) { 472 if (str == pos) { 473 /* starts with ... */ 474 if (!*str) { 475 AA_DEBUG(DEBUG_UNPACK, 476 "starting with null save=%lu size %d c=%d", 477 (unsigned long)(str - save), size, c); 478 return -4; 479 } 480 if (isspace(*str)) 481 return -5; 482 if (*str == ':') { 483 /* :ns_str\0str\0 484 * first character after : must be valid 485 */ 486 if (!str[1]) 487 return -6; 488 } 489 } else if (!*str) { 490 if (*pos == ':') 491 *str = ':'; 492 else 493 c++; 494 pos = str + 1; 495 } 496 str++; 497 } /* while */ 498 499 return c; 500 } 501 502 /** 503 * unpack_strs_table - unpack a profile transition table 504 * @e: serialized data extent information (NOT NULL) 505 * @name: name of table (MAY BE NULL) 506 * @multi: allow multiple strings on a single entry 507 * @strs: str table to unpack to (NOT NULL) 508 * 509 * Returns: 0 if table successfully unpacked or not present, else error 510 */ 511 static int unpack_strs_table(struct aa_ext *e, const char *name, bool multi, 512 struct aa_str_table *strs) 513 { 514 void *saved_pos = e->pos; 515 struct aa_str_table_ent *table = NULL; 516 int error = -EPROTO; 517 518 /* exec table is optional */ 519 if (aa_unpack_nameX(e, AA_STRUCT, name)) { 520 u16 size; 521 int i; 522 523 if (!aa_unpack_array(e, NULL, &size)) 524 /* 525 * Note: index into trans table array is a max 526 * of 2^24, but unpack array can only unpack 527 * an array of 2^16 in size atm so no need 528 * for size check here 529 */ 530 goto fail; 531 table = kzalloc_objs(struct aa_str_table_ent, size, GFP_KERNEL); 532 if (!table) { 533 error = -ENOMEM; 534 goto fail; 535 } 536 strs->table = table; 537 strs->size = size; 538 for (i = 0; i < size; i++) { 539 char *str; 540 int c, size2 = aa_unpack_strdup(e, &str, NULL); 541 /* aa_unpack_strdup verifies that the last character is 542 * null termination byte. 543 */ 544 c = process_strs_entry(str, size2, multi); 545 if (c <= 0) { 546 AA_DEBUG(DEBUG_UNPACK, "process_strs %d i %d pos %ld", 547 c, i, 548 (unsigned long)(e->pos - saved_pos)); 549 goto fail; 550 } 551 if (!multi && c > 1) { 552 AA_DEBUG(DEBUG_UNPACK, "!multi && c > 1"); 553 /* fail - all other cases with embedded \0 */ 554 goto fail; 555 } 556 table[i].strs = str; 557 table[i].count = c; 558 table[i].size = size2; 559 } 560 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 561 goto fail; 562 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 563 goto fail; 564 } 565 return 0; 566 567 fail: 568 aa_destroy_str_table(strs); 569 e->pos = saved_pos; 570 return error; 571 } 572 573 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile) 574 { 575 void *pos = e->pos; 576 577 if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) { 578 u16 size; 579 int i; 580 581 if (!aa_unpack_array(e, NULL, &size)) 582 goto fail; 583 profile->attach.xattr_count = size; 584 profile->attach.xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL); 585 if (!profile->attach.xattrs) 586 goto fail; 587 for (i = 0; i < size; i++) { 588 if (!aa_unpack_strdup(e, &profile->attach.xattrs[i], NULL)) 589 goto fail; 590 } 591 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 592 goto fail; 593 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 594 goto fail; 595 } 596 597 return true; 598 599 fail: 600 e->pos = pos; 601 return false; 602 } 603 604 static bool unpack_secmark(struct aa_ext *e, struct aa_ruleset *rules) 605 { 606 void *pos = e->pos; 607 u16 size; 608 int i; 609 610 if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) { 611 if (!aa_unpack_array(e, NULL, &size)) 612 goto fail; 613 614 rules->secmark = kzalloc_objs(struct aa_secmark, size, 615 GFP_KERNEL); 616 if (!rules->secmark) 617 goto fail; 618 619 rules->secmark_count = size; 620 621 for (i = 0; i < size; i++) { 622 if (!unpack_u8(e, &rules->secmark[i].audit, NULL)) 623 goto fail; 624 if (!unpack_u8(e, &rules->secmark[i].deny, NULL)) 625 goto fail; 626 if (!aa_unpack_strdup(e, &rules->secmark[i].label, NULL)) 627 goto fail; 628 } 629 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 630 goto fail; 631 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 632 goto fail; 633 } 634 635 return true; 636 637 fail: 638 if (rules->secmark) { 639 for (i = 0; i < size; i++) 640 kfree_sensitive(rules->secmark[i].label); 641 kfree_sensitive(rules->secmark); 642 rules->secmark_count = 0; 643 rules->secmark = NULL; 644 } 645 646 e->pos = pos; 647 return false; 648 } 649 650 static bool unpack_rlimits(struct aa_ext *e, struct aa_ruleset *rules) 651 { 652 void *pos = e->pos; 653 654 /* rlimits are optional */ 655 if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) { 656 u16 size; 657 int i; 658 u32 tmp = 0; 659 if (!aa_unpack_u32(e, &tmp, NULL)) 660 goto fail; 661 rules->rlimits.mask = tmp; 662 663 if (!aa_unpack_array(e, NULL, &size) || 664 size > RLIM_NLIMITS) 665 goto fail; 666 for (i = 0; i < size; i++) { 667 u64 tmp2 = 0; 668 int a = aa_map_resource(i); 669 if (!aa_unpack_u64(e, &tmp2, NULL)) 670 goto fail; 671 rules->rlimits.limits[a].rlim_max = tmp2; 672 } 673 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 674 goto fail; 675 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 676 goto fail; 677 } 678 return true; 679 680 fail: 681 e->pos = pos; 682 return false; 683 } 684 685 686 static bool verify_tags(struct aa_tags_struct *tags, const char **info) 687 { 688 if ((tags->hdrs.size && !tags->hdrs.table) || 689 (!tags->hdrs.size && tags->hdrs.table)) { 690 *info = "failed verification tag.hdrs disagree"; 691 return false; 692 } 693 if ((tags->sets.size && !tags->sets.table) || 694 (!tags->sets.size && tags->sets.table)) { 695 *info = "failed verification tag.sets disagree"; 696 return false; 697 } 698 if ((tags->strs.size && !tags->strs.table) || 699 (!tags->strs.size && tags->strs.table)) { 700 *info = "failed verification tags->strs disagree"; 701 return false; 702 } 703 /* no data present */ 704 if (!tags->sets.size && !tags->hdrs.size && !tags->strs.size) { 705 return true; 706 } else if (!(tags->sets.size && tags->hdrs.size && tags->strs.size)) { 707 /* some data present but not all */ 708 *info = "failed verification tags partial data present"; 709 return false; 710 } 711 712 u32 i; 713 714 for (i = 0; i < tags->sets.size; i++) { 715 /* count followed by count indexes into hdrs */ 716 u32 cnt = tags->sets.table[i]; 717 718 if (i+cnt >= tags->sets.size) { 719 AA_DEBUG(DEBUG_UNPACK, 720 "tagset too large %d+%d > sets.table[%d]", 721 i, cnt, tags->sets.size); 722 *info = "failed verification tagset too large"; 723 return false; 724 } 725 for (; cnt; cnt--) { 726 if (tags->sets.table[++i] >= tags->hdrs.size) { 727 AA_DEBUG(DEBUG_UNPACK, 728 "tagsets idx out of bounds cnt %d sets.table[%d] >= %d", 729 cnt, i-1, tags->hdrs.size); 730 *info = "failed verification tagsets idx out of bounds"; 731 return false; 732 } 733 } 734 } 735 for (i = 0; i < tags->hdrs.size; i++) { 736 u32 idx = tags->hdrs.table[i].tags; 737 738 if (idx >= tags->strs.size) { 739 AA_DEBUG(DEBUG_UNPACK, 740 "tag.hdrs idx oob idx %d > tags->strs.size=%d", 741 idx, tags->strs.size); 742 *info = "failed verification tags.hdrs idx out of bounds"; 743 return false; 744 } 745 if (tags->hdrs.table[i].count != tags->strs.table[idx].count) { 746 AA_DEBUG(DEBUG_UNPACK, "hdrs.table[%d].count=%d != tags->strs.table[%d]=%d", 747 i, tags->hdrs.table[i].count, idx, tags->strs.table[idx].count); 748 *info = "failed verification tagd.hdrs[idx].count"; 749 return false; 750 } 751 if (tags->hdrs.table[i].size != tags->strs.table[idx].size) { 752 AA_DEBUG(DEBUG_UNPACK, "hdrs.table[%d].size=%d != strs.table[%d].size=%d", 753 i, tags->hdrs.table[i].size, idx, tags->strs.table[idx].size); 754 *info = "failed verification tagd.hdrs[idx].size"; 755 return false; 756 } 757 } 758 759 return true; 760 } 761 762 static int unpack_tagsets(struct aa_ext *e, struct aa_tags_struct *tags) 763 { 764 u32 *sets; 765 u16 i, size; 766 int error = -EPROTO; 767 void *pos = e->pos; 768 769 if (!aa_unpack_array(e, "sets", &size)) 770 goto fail_reset; 771 sets = kcalloc(size, sizeof(u32), GFP_KERNEL); 772 if (!sets) { 773 error = -ENOMEM; 774 goto fail_reset; 775 } 776 for (i = 0; i < size; i++) { 777 if (!aa_unpack_u32(e, &sets[i], NULL)) 778 goto fail; 779 } 780 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 781 goto fail; 782 783 tags->sets.size = size; 784 tags->sets.table = sets; 785 786 return 0; 787 788 fail: 789 kfree_sensitive(sets); 790 fail_reset: 791 e->pos = pos; 792 return error; 793 } 794 795 static bool unpack_tag_header_ent(struct aa_ext *e, struct aa_tags_header *h) 796 { 797 return aa_unpack_u32(e, &h->mask, NULL) && 798 aa_unpack_u32(e, &h->count, NULL) && 799 aa_unpack_u32(e, &h->size, NULL) && 800 aa_unpack_u32(e, &h->tags, NULL); 801 } 802 803 static int unpack_tag_headers(struct aa_ext *e, struct aa_tags_struct *tags) 804 { 805 struct aa_tags_header *hdrs; 806 u16 i, size; 807 int error = -EPROTO; 808 void *pos = e->pos; 809 810 if (!aa_unpack_array(e, "hdrs", &size)) 811 goto fail_reset; 812 hdrs = kzalloc_objs(struct aa_tags_header, size, GFP_KERNEL); 813 if (!hdrs) { 814 error = -ENOMEM; 815 goto fail_reset; 816 } 817 for (i = 0; i < size; i++) { 818 if (!unpack_tag_header_ent(e, &hdrs[i])) 819 goto fail; 820 } 821 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 822 goto fail; 823 824 tags->hdrs.size = size; 825 tags->hdrs.table = hdrs; 826 AA_DEBUG(DEBUG_UNPACK, "headers %ld size %d", (long) hdrs, size); 827 return true; 828 829 fail: 830 kfree_sensitive(hdrs); 831 fail_reset: 832 e->pos = pos; 833 return error; 834 } 835 836 837 static int unpack_tags(struct aa_ext *e, struct aa_tags_struct *tags, 838 const char **info) 839 { 840 int error = -EPROTO; 841 void *pos = e->pos; 842 843 AA_BUG(!tags); 844 /* policy tags are optional */ 845 if (aa_unpack_nameX(e, AA_STRUCT, "tags")) { 846 u32 version; 847 848 if (!aa_unpack_u32(e, &version, "version") || version != 1) { 849 *info = "invalid tags version"; 850 goto fail_reset; 851 } 852 error = unpack_strs_table(e, "strs", true, &tags->strs); 853 if (error) { 854 *info = "failed to unpack profile tag.strs"; 855 goto fail; 856 } 857 error = unpack_tag_headers(e, tags); 858 if (error) { 859 *info = "failed to unpack profile tag.headers"; 860 goto fail; 861 } 862 error = unpack_tagsets(e, tags); 863 if (error) { 864 *info = "failed to unpack profile tag.sets"; 865 goto fail; 866 } 867 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 868 goto fail; 869 870 if (!verify_tags(tags, info)) 871 goto fail; 872 } 873 874 return 0; 875 876 fail: 877 aa_destroy_tags(tags); 878 fail_reset: 879 e->pos = pos; 880 return error; 881 } 882 883 static bool unpack_perm(struct aa_ext *e, u32 version, struct aa_perms *perm) 884 { 885 u32 reserved; 886 887 if (version != 1) 888 return false; 889 890 /* reserved entry is for later expansion, discard for now */ 891 return aa_unpack_u32(e, &reserved, NULL) && 892 aa_unpack_u32(e, &perm->allow, NULL) && 893 aa_unpack_u32(e, &perm->deny, NULL) && 894 aa_unpack_u32(e, &perm->subtree, NULL) && 895 aa_unpack_u32(e, &perm->cond, NULL) && 896 aa_unpack_u32(e, &perm->kill, NULL) && 897 aa_unpack_u32(e, &perm->complain, NULL) && 898 aa_unpack_u32(e, &perm->prompt, NULL) && 899 aa_unpack_u32(e, &perm->audit, NULL) && 900 aa_unpack_u32(e, &perm->quiet, NULL) && 901 aa_unpack_u32(e, &perm->hide, NULL) && 902 aa_unpack_u32(e, &perm->xindex, NULL) && 903 aa_unpack_u32(e, &perm->tag, NULL) && 904 aa_unpack_u32(e, &perm->label, NULL); 905 } 906 907 static ssize_t unpack_perms_table(struct aa_ext *e, struct aa_perms **perms) 908 { 909 void *pos = e->pos; 910 u16 size = 0; 911 912 AA_BUG(!perms); 913 /* 914 * policy perms are optional, in which case perms are embedded 915 * in the dfa accept table 916 */ 917 if (aa_unpack_nameX(e, AA_STRUCT, "perms")) { 918 int i; 919 u32 version; 920 921 if (!aa_unpack_u32(e, &version, "version")) 922 goto fail_reset; 923 if (!aa_unpack_array(e, NULL, &size)) 924 goto fail_reset; 925 *perms = kzalloc_objs(struct aa_perms, size, GFP_KERNEL); 926 if (!*perms) { 927 e->pos = pos; 928 return -ENOMEM; 929 } 930 for (i = 0; i < size; i++) { 931 if (!unpack_perm(e, version, &(*perms)[i])) 932 goto fail; 933 } 934 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL)) 935 goto fail; 936 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 937 goto fail; 938 } else 939 *perms = NULL; 940 941 return size; 942 943 fail: 944 kfree(*perms); 945 fail_reset: 946 e->pos = pos; 947 return -EPROTO; 948 } 949 950 static int unpack_pdb(struct aa_ext *e, struct aa_policydb **policy, 951 bool required_dfa, bool required_trans, 952 const char **info) 953 { 954 struct aa_policydb *pdb; 955 void *pos = e->pos; 956 int i, flags, error = -EPROTO; 957 ssize_t size; 958 u32 version = 0; 959 960 pdb = aa_alloc_pdb(GFP_KERNEL); 961 if (!pdb) 962 return -ENOMEM; 963 964 AA_DEBUG(DEBUG_UNPACK, "unpacking tags"); 965 if (unpack_tags(e, &pdb->tags, info) < 0) 966 goto fail; 967 AA_DEBUG(DEBUG_UNPACK, "done unpacking tags"); 968 969 size = unpack_perms_table(e, &pdb->perms); 970 if (size < 0) { 971 error = size; 972 pdb->perms = NULL; 973 *info = "failed to unpack - perms"; 974 goto fail; 975 } 976 pdb->size = size; 977 978 if (pdb->perms) { 979 /* perms table present accept is index */ 980 flags = TO_ACCEPT1_FLAG(YYTD_DATA32); 981 if (aa_unpack_u32(e, &version, "permsv") && version > 2) 982 /* accept2 used for dfa flags */ 983 flags |= TO_ACCEPT2_FLAG(YYTD_DATA32); 984 } else { 985 /* packed perms in accept1 and accept2 */ 986 flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | 987 TO_ACCEPT2_FLAG(YYTD_DATA32); 988 } 989 990 pdb->dfa = unpack_dfa(e, flags); 991 if (IS_ERR(pdb->dfa)) { 992 error = PTR_ERR(pdb->dfa); 993 pdb->dfa = NULL; 994 *info = "failed to unpack - dfa"; 995 goto fail; 996 } else if (!pdb->dfa) { 997 if (required_dfa) { 998 *info = "missing required dfa"; 999 goto fail; 1000 } 1001 } else { 1002 /* 1003 * only unpack the following if a dfa is present 1004 * 1005 * sadly start was given different names for file and policydb 1006 * but since it is optional we can try both 1007 */ 1008 if (!aa_unpack_u32(e, &pdb->start[0], "start")) 1009 /* default start state */ 1010 pdb->start[0] = DFA_START; 1011 if (!aa_unpack_u32(e, &pdb->start[AA_CLASS_FILE], "dfa_start")) { 1012 /* default start state for xmatch and file dfa */ 1013 pdb->start[AA_CLASS_FILE] = DFA_START; 1014 } /* setup class index */ 1015 for (i = AA_CLASS_FILE + 1; i <= AA_CLASS_LAST; i++) { 1016 pdb->start[i] = aa_dfa_next(pdb->dfa, pdb->start[0], 1017 i); 1018 } 1019 } 1020 1021 /* accept2 is in some cases being allocated, even with perms */ 1022 if (pdb->perms && !pdb->dfa->tables[YYTD_ID_ACCEPT2]) { 1023 /* add dfa flags table missing in v2 */ 1024 u32 noents = pdb->dfa->tables[YYTD_ID_ACCEPT]->td_lolen; 1025 u16 tdflags = pdb->dfa->tables[YYTD_ID_ACCEPT]->td_flags; 1026 size_t tsize = table_size(noents, tdflags); 1027 1028 pdb->dfa->tables[YYTD_ID_ACCEPT2] = kvzalloc(tsize, GFP_KERNEL); 1029 if (!pdb->dfa->tables[YYTD_ID_ACCEPT2]) { 1030 *info = "failed to alloc dfa flags table"; 1031 goto out; 1032 } 1033 pdb->dfa->tables[YYTD_ID_ACCEPT2]->td_lolen = noents; 1034 pdb->dfa->tables[YYTD_ID_ACCEPT2]->td_flags = tdflags; 1035 } 1036 /* 1037 * Unfortunately due to a bug in earlier userspaces, a 1038 * transition table may be present even when the dfa is 1039 * not. For compatibility reasons unpack and discard. 1040 */ 1041 error = unpack_strs_table(e, "xtable", false, &pdb->trans); 1042 if (error && required_trans) { 1043 *info = "failed to unpack profile transition table"; 1044 goto fail; 1045 } 1046 1047 if (!pdb->dfa && pdb->trans.table) 1048 aa_destroy_str_table(&pdb->trans); 1049 1050 /* TODO: 1051 * - move compat mapping here, requires dfa merging first 1052 * - move verify here, it has to be done after compat mappings 1053 * - move free of unneeded trans table here, has to be done 1054 * after perm mapping. 1055 */ 1056 out: 1057 *policy = pdb; 1058 return 0; 1059 1060 fail: 1061 aa_put_pdb(pdb); 1062 e->pos = pos; 1063 return error; 1064 } 1065 1066 static u32 strhash(const void *data, u32 len, u32 seed) 1067 { 1068 const char * const *key = data; 1069 1070 return jhash(*key, strlen(*key), seed); 1071 } 1072 1073 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj) 1074 { 1075 const struct aa_data *data = obj; 1076 const char * const *key = arg->key; 1077 1078 return strcmp(data->key, *key); 1079 } 1080 1081 /** 1082 * unpack_profile - unpack a serialized profile 1083 * @e: serialized data extent information (NOT NULL) 1084 * @ns_name: pointer of newly allocated copy of %NULL in case of error 1085 * 1086 * NOTE: unpack profile sets audit struct if there is a failure 1087 */ 1088 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) 1089 { 1090 struct aa_ruleset *rules; 1091 struct aa_profile *profile = NULL; 1092 const char *tmpname, *tmpns = NULL, *name = NULL; 1093 const char *info = "failed to unpack profile"; 1094 size_t ns_len; 1095 struct rhashtable_params params = { 0 }; 1096 char *key = NULL, *disconnected = NULL; 1097 struct aa_data *data; 1098 int error = -EPROTO; 1099 kernel_cap_t tmpcap; 1100 u32 tmp; 1101 1102 *ns_name = NULL; 1103 1104 /* check that we have the right struct being passed */ 1105 if (!aa_unpack_nameX(e, AA_STRUCT, "profile")) 1106 goto fail; 1107 if (!aa_unpack_str(e, &name, NULL)) 1108 goto fail; 1109 if (*name == '\0') 1110 goto fail; 1111 1112 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len); 1113 if (tmpns) { 1114 if (!tmpname) { 1115 info = "empty profile name"; 1116 goto fail; 1117 } 1118 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL); 1119 if (!*ns_name) { 1120 info = "out of memory"; 1121 error = -ENOMEM; 1122 goto fail; 1123 } 1124 name = tmpname; 1125 } 1126 1127 profile = aa_alloc_profile(name, NULL, GFP_KERNEL); 1128 if (!profile) { 1129 info = "out of memory"; 1130 error = -ENOMEM; 1131 goto fail; 1132 } 1133 rules = profile->label.rules[0]; 1134 1135 /* profile renaming is optional */ 1136 (void) aa_unpack_str(e, &profile->rename, "rename"); 1137 1138 /* attachment string is optional */ 1139 (void) aa_unpack_str(e, &profile->attach.xmatch_str, "attach"); 1140 1141 /* xmatch is optional and may be NULL */ 1142 error = unpack_pdb(e, &profile->attach.xmatch, false, false, &info); 1143 if (error) { 1144 info = "bad xmatch"; 1145 goto fail; 1146 } 1147 1148 /* neither xmatch_len not xmatch_perms are optional if xmatch is set */ 1149 if (profile->attach.xmatch->dfa) { 1150 if (!aa_unpack_u32(e, &tmp, NULL)) { 1151 info = "missing xmatch len"; 1152 goto fail; 1153 } 1154 profile->attach.xmatch_len = tmp; 1155 profile->attach.xmatch->start[AA_CLASS_XMATCH] = DFA_START; 1156 if (!profile->attach.xmatch->perms) { 1157 error = aa_compat_map_xmatch(profile->attach.xmatch); 1158 if (error) { 1159 info = "failed to convert xmatch permission table"; 1160 goto fail; 1161 } 1162 } 1163 } 1164 1165 /* disconnected attachment string is optional */ 1166 (void) aa_unpack_strdup(e, &disconnected, "disconnected"); 1167 profile->disconnected = disconnected; 1168 1169 /* optional */ 1170 (void) aa_unpack_u32(e, &profile->signal, "kill"); 1171 if (profile->signal < 1 || profile->signal > MAXMAPPED_SIG) { 1172 info = "profile kill.signal invalid value"; 1173 goto fail; 1174 } 1175 /* per profile debug flags (complain, audit) */ 1176 if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) { 1177 info = "profile missing flags"; 1178 goto fail; 1179 } 1180 info = "failed to unpack profile flags"; 1181 if (!aa_unpack_u32(e, &tmp, NULL)) 1182 goto fail; 1183 if (tmp & PACKED_FLAG_HAT) 1184 profile->label.flags |= FLAG_HAT; 1185 if (tmp & PACKED_FLAG_DEBUG1) 1186 profile->label.flags |= FLAG_DEBUG1; 1187 if (tmp & PACKED_FLAG_DEBUG2) 1188 profile->label.flags |= FLAG_DEBUG2; 1189 if (!aa_unpack_u32(e, &tmp, NULL)) 1190 goto fail; 1191 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) { 1192 profile->mode = APPARMOR_COMPLAIN; 1193 } else if (tmp == PACKED_MODE_ENFORCE) { 1194 profile->mode = APPARMOR_ENFORCE; 1195 } else if (tmp == PACKED_MODE_KILL) { 1196 profile->mode = APPARMOR_KILL; 1197 } else if (tmp == PACKED_MODE_UNCONFINED) { 1198 profile->mode = APPARMOR_UNCONFINED; 1199 profile->label.flags |= FLAG_UNCONFINED; 1200 } else if (tmp == PACKED_MODE_USER) { 1201 profile->mode = APPARMOR_USER; 1202 } else { 1203 goto fail; 1204 } 1205 if (!aa_unpack_u32(e, &tmp, NULL)) 1206 goto fail; 1207 if (tmp) 1208 profile->audit = AUDIT_ALL; 1209 1210 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 1211 goto fail; 1212 1213 /* path_flags is optional */ 1214 if (aa_unpack_u32(e, &profile->path_flags, "path_flags")) 1215 profile->path_flags |= profile->label.flags & 1216 PATH_MEDIATE_DELETED; 1217 else 1218 /* set a default value if path_flags field is not present */ 1219 profile->path_flags = PATH_MEDIATE_DELETED; 1220 1221 info = "failed to unpack profile capabilities"; 1222 if (!aa_unpack_cap_low(e, &rules->caps.allow, NULL)) 1223 goto fail; 1224 if (!aa_unpack_cap_low(e, &rules->caps.audit, NULL)) 1225 goto fail; 1226 if (!aa_unpack_cap_low(e, &rules->caps.quiet, NULL)) 1227 goto fail; 1228 if (!aa_unpack_cap_low(e, &tmpcap, NULL)) 1229 goto fail; 1230 1231 info = "failed to unpack upper profile capabilities"; 1232 if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) { 1233 /* optional upper half of 64 bit caps */ 1234 if (!aa_unpack_cap_high(e, &rules->caps.allow, NULL)) 1235 goto fail; 1236 if (!aa_unpack_cap_high(e, &rules->caps.audit, NULL)) 1237 goto fail; 1238 if (!aa_unpack_cap_high(e, &rules->caps.quiet, NULL)) 1239 goto fail; 1240 if (!aa_unpack_cap_high(e, &tmpcap, NULL)) 1241 goto fail; 1242 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 1243 goto fail; 1244 } 1245 1246 info = "failed to unpack extended profile capabilities"; 1247 if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) { 1248 /* optional extended caps mediation mask */ 1249 if (!aa_unpack_cap_low(e, &rules->caps.extended, NULL)) 1250 goto fail; 1251 if (!aa_unpack_cap_high(e, &rules->caps.extended, NULL)) 1252 goto fail; 1253 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 1254 goto fail; 1255 } 1256 1257 if (!unpack_xattrs(e, profile)) { 1258 info = "failed to unpack profile xattrs"; 1259 goto fail; 1260 } 1261 1262 if (!unpack_rlimits(e, rules)) { 1263 info = "failed to unpack profile rlimits"; 1264 goto fail; 1265 } 1266 1267 if (!unpack_secmark(e, rules)) { 1268 info = "failed to unpack profile secmark rules"; 1269 goto fail; 1270 } 1271 1272 if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) { 1273 /* generic policy dfa - optional and may be NULL */ 1274 info = "failed to unpack policydb"; 1275 error = unpack_pdb(e, &rules->policy, true, false, 1276 &info); 1277 if (error) 1278 goto fail; 1279 /* Fixup: drop when we get rid of start array */ 1280 if (aa_dfa_next(rules->policy->dfa, rules->policy->start[0], 1281 AA_CLASS_FILE)) 1282 rules->policy->start[AA_CLASS_FILE] = 1283 aa_dfa_next(rules->policy->dfa, 1284 rules->policy->start[0], 1285 AA_CLASS_FILE); 1286 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) 1287 goto fail; 1288 if (!rules->policy->perms) { 1289 error = aa_compat_map_policy(rules->policy, 1290 e->version); 1291 if (error) { 1292 info = "failed to remap policydb permission table"; 1293 goto fail; 1294 } 1295 } 1296 } else { 1297 rules->policy = aa_get_pdb(nullpdb); 1298 } 1299 /* get file rules */ 1300 error = unpack_pdb(e, &rules->file, false, true, &info); 1301 if (error) { 1302 goto fail; 1303 } else if (rules->file->dfa) { 1304 if (!rules->file->perms) { 1305 AA_DEBUG(DEBUG_UNPACK, "compat mapping perms"); 1306 error = aa_compat_map_file(rules->file); 1307 if (error) { 1308 info = "failed to remap file permission table"; 1309 goto fail; 1310 } 1311 } 1312 } else if (rules->policy->dfa && 1313 rules->policy->start[AA_CLASS_FILE]) { 1314 aa_put_pdb(rules->file); 1315 rules->file = aa_get_pdb(rules->policy); 1316 } else { 1317 aa_put_pdb(rules->file); 1318 rules->file = aa_get_pdb(nullpdb); 1319 } 1320 error = -EPROTO; 1321 if (aa_unpack_nameX(e, AA_STRUCT, "data")) { 1322 info = "out of memory"; 1323 profile->data = kzalloc_obj(*profile->data, GFP_KERNEL); 1324 if (!profile->data) { 1325 error = -ENOMEM; 1326 goto fail; 1327 } 1328 params.nelem_hint = 3; 1329 params.key_len = sizeof(void *); 1330 params.key_offset = offsetof(struct aa_data, key); 1331 params.head_offset = offsetof(struct aa_data, head); 1332 params.hashfn = strhash; 1333 params.obj_cmpfn = datacmp; 1334 1335 if (rhashtable_init(profile->data, ¶ms)) { 1336 info = "failed to init key, value hash table"; 1337 goto fail; 1338 } 1339 1340 while (aa_unpack_strdup(e, &key, NULL)) { 1341 data = kzalloc_obj(*data, GFP_KERNEL); 1342 if (!data) { 1343 kfree_sensitive(key); 1344 error = -ENOMEM; 1345 goto fail; 1346 } 1347 1348 data->key = key; 1349 data->size = aa_unpack_blob(e, &data->data, NULL); 1350 data->data = kvmemdup(data->data, data->size, GFP_KERNEL); 1351 if (data->size && !data->data) { 1352 kfree_sensitive(data->key); 1353 kfree_sensitive(data); 1354 error = -ENOMEM; 1355 goto fail; 1356 } 1357 1358 if (rhashtable_insert_fast(profile->data, &data->head, 1359 profile->data->p)) { 1360 kvfree_sensitive(data->data, data->size); 1361 kfree_sensitive(data->key); 1362 kfree_sensitive(data); 1363 info = "failed to insert data to table"; 1364 goto fail; 1365 } 1366 } 1367 1368 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1369 info = "failed to unpack end of key, value data table"; 1370 goto fail; 1371 } 1372 } 1373 1374 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) { 1375 info = "failed to unpack end of profile"; 1376 goto fail; 1377 } 1378 1379 aa_compute_profile_mediates(profile); 1380 1381 return profile; 1382 1383 fail: 1384 if (error == 0) 1385 /* default error covers most cases */ 1386 error = -EPROTO; 1387 if (*ns_name) { 1388 kfree(*ns_name); 1389 *ns_name = NULL; 1390 } 1391 if (profile) 1392 name = NULL; 1393 else if (!name) 1394 name = "unknown"; 1395 audit_iface(profile, NULL, name, info, e, error); 1396 aa_free_profile(profile); 1397 1398 return ERR_PTR(error); 1399 } 1400 1401 /** 1402 * verify_header - unpack serialized stream header 1403 * @e: serialized data read head (NOT NULL) 1404 * @required: whether the header is required or optional 1405 * @ns: Returns - namespace if one is specified else NULL (NOT NULL) 1406 * 1407 * Returns: error or 0 if header is good 1408 */ 1409 static int verify_header(struct aa_ext *e, int required, const char **ns) 1410 { 1411 int error = -EPROTONOSUPPORT; 1412 const char *name = NULL; 1413 *ns = NULL; 1414 1415 /* get the interface version */ 1416 if (!aa_unpack_u32(e, &e->version, "version")) { 1417 if (required) { 1418 audit_iface(NULL, NULL, NULL, "invalid profile format", 1419 e, error); 1420 return error; 1421 } 1422 } 1423 1424 /* Check that the interface version is currently supported. 1425 * if not specified use previous version 1426 * Mask off everything that is not kernel abi version 1427 */ 1428 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v9)) { 1429 audit_iface(NULL, NULL, NULL, "unsupported interface version", 1430 e, error); 1431 return error; 1432 } 1433 1434 /* read the namespace if present */ 1435 if (aa_unpack_str(e, &name, "namespace")) { 1436 if (*name == '\0') { 1437 audit_iface(NULL, NULL, NULL, "invalid namespace name", 1438 e, error); 1439 return error; 1440 } 1441 if (*ns && strcmp(*ns, name)) { 1442 audit_iface(NULL, NULL, NULL, "invalid ns change", e, 1443 error); 1444 } else if (!*ns) { 1445 *ns = kstrdup(name, GFP_KERNEL); 1446 if (!*ns) 1447 return -ENOMEM; 1448 } 1449 } 1450 1451 return 0; 1452 } 1453 1454 /** 1455 * verify_dfa_accept_index - verify accept indexes are in range of perms table 1456 * @dfa: the dfa to check accept indexes are in range 1457 * @table_size: the permission table size the indexes should be within 1458 */ 1459 static bool verify_dfa_accept_index(struct aa_dfa *dfa, int table_size) 1460 { 1461 int i; 1462 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) { 1463 if (ACCEPT_TABLE(dfa)[i] >= table_size) 1464 return false; 1465 } 1466 return true; 1467 } 1468 1469 static bool verify_perm(struct aa_perms *perm) 1470 { 1471 /* TODO: allow option to just force the perms into a valid state */ 1472 if (perm->allow & perm->deny) 1473 return false; 1474 if (perm->subtree & ~perm->allow) 1475 return false; 1476 if (perm->cond & (perm->allow | perm->deny)) 1477 return false; 1478 if (perm->kill & perm->allow) 1479 return false; 1480 if (perm->complain & (perm->allow | perm->deny)) 1481 return false; 1482 if (perm->prompt & (perm->allow | perm->deny)) 1483 return false; 1484 if (perm->complain & perm->prompt) 1485 return false; 1486 if (perm->hide & perm->allow) 1487 return false; 1488 1489 return true; 1490 } 1491 1492 static bool verify_perms(struct aa_policydb *pdb) 1493 { 1494 int i; 1495 int xidx, xmax = -1; 1496 1497 for (i = 0; i < pdb->size; i++) { 1498 if (!verify_perm(&pdb->perms[i])) 1499 return false; 1500 /* verify indexes into str table */ 1501 if ((pdb->perms[i].xindex & AA_X_TYPE_MASK) == AA_X_TABLE) { 1502 xidx = pdb->perms[i].xindex & AA_X_INDEX_MASK; 1503 if (xidx >= pdb->trans.size) 1504 return false; 1505 if (xmax < xidx) 1506 xmax = xidx; 1507 } 1508 if (pdb->perms[i].tag && pdb->perms[i].tag >= pdb->tags.sets.size) 1509 return false; 1510 if (pdb->perms[i].label && 1511 pdb->perms[i].label >= pdb->trans.size) 1512 return false; 1513 } 1514 /* deal with incorrectly constructed string tables */ 1515 if (xmax == -1) { 1516 aa_destroy_str_table(&pdb->trans); 1517 } else if (pdb->trans.size > xmax + 1) { 1518 if (!aa_resize_str_table(&pdb->trans, xmax + 1, GFP_KERNEL)) 1519 return false; 1520 } 1521 return true; 1522 } 1523 1524 /** 1525 * verify_profile - Do post unpack analysis to verify profile consistency 1526 * @profile: profile to verify (NOT NULL) 1527 * 1528 * Returns: 0 if passes verification else error 1529 * 1530 * This verification is post any unpack mapping or changes 1531 */ 1532 static int verify_profile(struct aa_profile *profile) 1533 { 1534 struct aa_ruleset *rules = profile->label.rules[0]; 1535 1536 if (!rules) 1537 return 0; 1538 1539 if (rules->file->dfa && !verify_dfa_accept_index(rules->file->dfa, 1540 rules->file->size)) { 1541 audit_iface(profile, NULL, NULL, 1542 "Unpack: file Invalid named transition", NULL, 1543 -EPROTO); 1544 return -EPROTO; 1545 } 1546 if (rules->policy->dfa && 1547 !verify_dfa_accept_index(rules->policy->dfa, rules->policy->size)) { 1548 audit_iface(profile, NULL, NULL, 1549 "Unpack: policy Invalid named transition", NULL, 1550 -EPROTO); 1551 return -EPROTO; 1552 } 1553 1554 if (!verify_perms(rules->file)) { 1555 audit_iface(profile, NULL, NULL, 1556 "Unpack: Invalid perm index", NULL, -EPROTO); 1557 return -EPROTO; 1558 } 1559 if (!verify_perms(rules->policy)) { 1560 audit_iface(profile, NULL, NULL, 1561 "Unpack: Invalid perm index", NULL, -EPROTO); 1562 return -EPROTO; 1563 } 1564 if (!verify_perms(profile->attach.xmatch)) { 1565 audit_iface(profile, NULL, NULL, 1566 "Unpack: Invalid perm index", NULL, -EPROTO); 1567 return -EPROTO; 1568 } 1569 1570 return 0; 1571 } 1572 1573 void aa_load_ent_free(struct aa_load_ent *ent) 1574 { 1575 if (ent) { 1576 aa_put_profile(ent->rename); 1577 aa_put_profile(ent->old); 1578 aa_put_profile(ent->new); 1579 kfree(ent->ns_name); 1580 kfree_sensitive(ent); 1581 } 1582 } 1583 1584 struct aa_load_ent *aa_load_ent_alloc(void) 1585 { 1586 struct aa_load_ent *ent = kzalloc_obj(*ent, GFP_KERNEL); 1587 if (ent) 1588 INIT_LIST_HEAD(&ent->list); 1589 return ent; 1590 } 1591 1592 static int compress_zstd(const char *src, size_t slen, char **dst, size_t *dlen) 1593 { 1594 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY 1595 const zstd_parameters params = 1596 zstd_get_params(aa_g_rawdata_compression_level, slen); 1597 const size_t wksp_len = zstd_cctx_workspace_bound(¶ms.cParams); 1598 void *wksp = NULL; 1599 zstd_cctx *ctx = NULL; 1600 size_t out_len = zstd_compress_bound(slen); 1601 void *out = NULL; 1602 int ret = 0; 1603 1604 out = kvzalloc(out_len, GFP_KERNEL); 1605 if (!out) { 1606 ret = -ENOMEM; 1607 goto cleanup; 1608 } 1609 1610 wksp = kvzalloc(wksp_len, GFP_KERNEL); 1611 if (!wksp) { 1612 ret = -ENOMEM; 1613 goto cleanup; 1614 } 1615 1616 ctx = zstd_init_cctx(wksp, wksp_len); 1617 if (!ctx) { 1618 ret = -EINVAL; 1619 goto cleanup; 1620 } 1621 1622 out_len = zstd_compress_cctx(ctx, out, out_len, src, slen, ¶ms); 1623 if (zstd_is_error(out_len) || out_len >= slen) { 1624 ret = -EINVAL; 1625 goto cleanup; 1626 } 1627 1628 if (is_vmalloc_addr(out)) { 1629 *dst = kvzalloc(out_len, GFP_KERNEL); 1630 if (*dst) { 1631 memcpy(*dst, out, out_len); 1632 kvfree(out); 1633 out = NULL; 1634 } 1635 } else { 1636 /* 1637 * If the staging buffer was kmalloc'd, then using krealloc is 1638 * probably going to be faster. The destination buffer will 1639 * always be smaller, so it's just shrunk, avoiding a memcpy 1640 */ 1641 *dst = krealloc(out, out_len, GFP_KERNEL); 1642 } 1643 1644 if (!*dst) { 1645 ret = -ENOMEM; 1646 goto cleanup; 1647 } 1648 1649 *dlen = out_len; 1650 1651 cleanup: 1652 if (ret) { 1653 kvfree(out); 1654 *dst = NULL; 1655 } 1656 1657 kvfree(wksp); 1658 return ret; 1659 #else 1660 *dlen = slen; 1661 return 0; 1662 #endif 1663 } 1664 1665 static int compress_loaddata(struct aa_loaddata *data) 1666 { 1667 AA_BUG(data->compressed_size > 0); 1668 1669 /* 1670 * Shortcut the no compression case, else we increase the amount of 1671 * storage required by a small amount 1672 */ 1673 if (aa_g_rawdata_compression_level != 0) { 1674 void *udata = data->data; 1675 int error = compress_zstd(udata, data->size, &data->data, 1676 &data->compressed_size); 1677 if (error) { 1678 data->compressed_size = data->size; 1679 return error; 1680 } 1681 if (udata != data->data) 1682 kvfree(udata); 1683 } else 1684 data->compressed_size = data->size; 1685 1686 return 0; 1687 } 1688 1689 /** 1690 * aa_unpack - unpack packed binary profile(s) data loaded from user space 1691 * @udata: user data copied to kmem (NOT NULL) 1692 * @lh: list to place unpacked profiles in a aa_repl_ws 1693 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL) 1694 * 1695 * Unpack user data and return refcounted allocated profile(s) stored in 1696 * @lh in order of discovery, with the list chain stored in base.list 1697 * or error 1698 * 1699 * Returns: profile(s) on @lh else error pointer if fails to unpack 1700 */ 1701 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh, 1702 const char **ns) 1703 { 1704 struct aa_load_ent *tmp, *ent; 1705 struct aa_profile *profile = NULL; 1706 char *ns_name = NULL; 1707 int error; 1708 struct aa_ext e = { 1709 .start = udata->data, 1710 .end = udata->data + udata->size, 1711 .pos = udata->data, 1712 }; 1713 1714 *ns = NULL; 1715 while (e.pos < e.end) { 1716 void *start; 1717 error = verify_header(&e, e.pos == e.start, ns); 1718 if (error) 1719 goto fail; 1720 1721 start = e.pos; 1722 profile = unpack_profile(&e, &ns_name); 1723 if (IS_ERR(profile)) { 1724 error = PTR_ERR(profile); 1725 goto fail; 1726 } 1727 1728 error = verify_profile(profile); 1729 if (error) 1730 goto fail_profile; 1731 1732 if (aa_g_hash_policy) 1733 error = aa_calc_profile_hash(profile, e.version, start, 1734 e.pos - start); 1735 if (error) 1736 goto fail_profile; 1737 1738 ent = aa_load_ent_alloc(); 1739 if (!ent) { 1740 error = -ENOMEM; 1741 goto fail_profile; 1742 } 1743 1744 ent->new = profile; 1745 ent->ns_name = ns_name; 1746 ns_name = NULL; 1747 list_add_tail(&ent->list, lh); 1748 } 1749 udata->abi = e.version & K_ABI_MASK; 1750 if (aa_g_hash_policy) { 1751 udata->hash = aa_calc_hash(udata->data, udata->size); 1752 if (IS_ERR(udata->hash)) { 1753 error = PTR_ERR(udata->hash); 1754 udata->hash = NULL; 1755 goto fail; 1756 } 1757 } 1758 1759 if (aa_g_export_binary) { 1760 error = compress_loaddata(udata); 1761 if (error) 1762 goto fail; 1763 } 1764 return 0; 1765 1766 fail_profile: 1767 kfree(ns_name); 1768 aa_put_profile(profile); 1769 1770 fail: 1771 list_for_each_entry_safe(ent, tmp, lh, list) { 1772 list_del_init(&ent->list); 1773 aa_load_ent_free(ent); 1774 } 1775 1776 return error; 1777 } 1778