1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor security identifier (secid) manipulation fns 6 * 7 * Copyright 2009-2017 Canonical Ltd. 8 * 9 * AppArmor allocates a unique secid for every label used. If a label 10 * is replaced it receives the secid of the label it is replacing. 11 */ 12 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/gfp.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/xarray.h> 19 20 #include "include/cred.h" 21 #include "include/lib.h" 22 #include "include/secid.h" 23 #include "include/label.h" 24 #include "include/policy_ns.h" 25 26 /* 27 * secids - do not pin labels with a refcount. They rely on the label 28 * properly updating/freeing them 29 */ 30 #define AA_FIRST_SECID 2 31 32 static DEFINE_XARRAY_FLAGS(aa_secids, XA_FLAGS_LOCK_IRQ | XA_FLAGS_TRACK_FREE); 33 34 int apparmor_display_secid_mode; 35 36 /* 37 * TODO: allow policy to reserve a secid range? 38 * TODO: add secid pinning 39 * TODO: use secid_update in label replace 40 */ 41 42 /** 43 * aa_secid_update - update a secid mapping to a new label 44 * @secid: secid to update 45 * @label: label the secid will now map to 46 */ 47 void aa_secid_update(u32 secid, struct aa_label *label) 48 { 49 unsigned long flags; 50 51 xa_lock_irqsave(&aa_secids, flags); 52 __xa_store(&aa_secids, secid, label, 0); 53 xa_unlock_irqrestore(&aa_secids, flags); 54 } 55 56 /** 57 * 58 * see label for inverse aa_label_to_secid 59 */ 60 struct aa_label *aa_secid_to_label(u32 secid) 61 { 62 return xa_load(&aa_secids, secid); 63 } 64 65 int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 66 { 67 /* TODO: cache secctx and ref count so we don't have to recreate */ 68 struct aa_label *label = aa_secid_to_label(secid); 69 int flags = FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT; 70 int len; 71 72 AA_BUG(!seclen); 73 74 if (!label) 75 return -EINVAL; 76 77 if (apparmor_display_secid_mode) 78 flags |= FLAG_SHOW_MODE; 79 80 if (secdata) 81 len = aa_label_asxprint(secdata, root_ns, label, 82 flags, GFP_ATOMIC); 83 else 84 len = aa_label_snxprint(NULL, 0, root_ns, label, flags); 85 86 if (len < 0) 87 return -ENOMEM; 88 89 *seclen = len; 90 91 return 0; 92 } 93 94 int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) 95 { 96 struct aa_label *label; 97 98 label = aa_label_strn_parse(&root_ns->unconfined->label, secdata, 99 seclen, GFP_KERNEL, false, false); 100 if (IS_ERR(label)) 101 return PTR_ERR(label); 102 *secid = label->secid; 103 104 return 0; 105 } 106 107 void apparmor_release_secctx(char *secdata, u32 seclen) 108 { 109 kfree(secdata); 110 } 111 112 /** 113 * aa_alloc_secid - allocate a new secid for a profile 114 * @label: the label to allocate a secid for 115 * @gfp: memory allocation flags 116 * 117 * Returns: 0 with @label->secid initialized 118 * <0 returns error with @label->secid set to AA_SECID_INVALID 119 */ 120 int aa_alloc_secid(struct aa_label *label, gfp_t gfp) 121 { 122 unsigned long flags; 123 int ret; 124 125 xa_lock_irqsave(&aa_secids, flags); 126 ret = __xa_alloc(&aa_secids, &label->secid, label, 127 XA_LIMIT(AA_FIRST_SECID, INT_MAX), gfp); 128 xa_unlock_irqrestore(&aa_secids, flags); 129 130 if (ret < 0) { 131 label->secid = AA_SECID_INVALID; 132 return ret; 133 } 134 135 return 0; 136 } 137 138 /** 139 * aa_free_secid - free a secid 140 * @secid: secid to free 141 */ 142 void aa_free_secid(u32 secid) 143 { 144 unsigned long flags; 145 146 xa_lock_irqsave(&aa_secids, flags); 147 __xa_erase(&aa_secids, secid); 148 xa_unlock_irqrestore(&aa_secids, flags); 149 } 150