1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AppArmor security module 4 * 5 * This file contains AppArmor security identifier (secid) manipulation fns 6 * 7 * Copyright 2009-2017 Canonical Ltd. 8 * 9 * AppArmor allocates a unique secid for every label used. If a label 10 * is replaced it receives the secid of the label it is replacing. 11 */ 12 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/gfp.h> 16 #include <linux/slab.h> 17 #include <linux/spinlock.h> 18 #include <linux/xarray.h> 19 20 #include "include/cred.h" 21 #include "include/lib.h" 22 #include "include/secid.h" 23 #include "include/label.h" 24 #include "include/policy_ns.h" 25 26 /* 27 * secids - do not pin labels with a refcount. They rely on the label 28 * properly updating/freeing them 29 */ 30 #define AA_FIRST_SECID 2 31 32 static DEFINE_XARRAY_FLAGS(aa_secids, XA_FLAGS_LOCK_IRQ | XA_FLAGS_TRACK_FREE); 33 34 int apparmor_display_secid_mode; 35 36 /* 37 * TODO: allow policy to reserve a secid range? 38 * TODO: add secid pinning 39 * TODO: use secid_update in label replace 40 */ 41 42 /** 43 * aa_secid_update - update a secid mapping to a new label 44 * @secid: secid to update 45 * @label: label the secid will now map to 46 */ 47 void aa_secid_update(u32 secid, struct aa_label *label) 48 { 49 unsigned long flags; 50 51 xa_lock_irqsave(&aa_secids, flags); 52 __xa_store(&aa_secids, secid, label, 0); 53 xa_unlock_irqrestore(&aa_secids, flags); 54 } 55 56 /* 57 * see label for inverse aa_label_to_secid 58 */ 59 struct aa_label *aa_secid_to_label(u32 secid) 60 { 61 return xa_load(&aa_secids, secid); 62 } 63 64 static int apparmor_label_to_secctx(struct aa_label *label, char **secdata, 65 u32 *seclen) 66 { 67 /* TODO: cache secctx and ref count so we don't have to recreate */ 68 int flags = FLAG_VIEW_SUBNS | FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT; 69 int len; 70 71 AA_BUG(!seclen); 72 73 if (!label) 74 return -EINVAL; 75 76 if (apparmor_display_secid_mode) 77 flags |= FLAG_SHOW_MODE; 78 79 if (secdata) 80 len = aa_label_asxprint(secdata, root_ns, label, 81 flags, GFP_ATOMIC); 82 else 83 len = aa_label_snxprint(NULL, 0, root_ns, label, flags); 84 85 if (len < 0) 86 return -ENOMEM; 87 88 *seclen = len; 89 90 return 0; 91 } 92 93 int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 94 { 95 struct aa_label *label = aa_secid_to_label(secid); 96 97 return apparmor_label_to_secctx(label, secdata, seclen); 98 } 99 100 int apparmor_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, 101 u32 *seclen) 102 { 103 struct aa_label *label; 104 105 label = prop->apparmor.label; 106 107 return apparmor_label_to_secctx(label, secdata, seclen); 108 } 109 110 int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) 111 { 112 struct aa_label *label; 113 114 label = aa_label_strn_parse(&root_ns->unconfined->label, secdata, 115 seclen, GFP_KERNEL, false, false); 116 if (IS_ERR(label)) 117 return PTR_ERR(label); 118 *secid = label->secid; 119 120 return 0; 121 } 122 123 void apparmor_release_secctx(char *secdata, u32 seclen) 124 { 125 kfree(secdata); 126 } 127 128 /** 129 * aa_alloc_secid - allocate a new secid for a profile 130 * @label: the label to allocate a secid for 131 * @gfp: memory allocation flags 132 * 133 * Returns: 0 with @label->secid initialized 134 * <0 returns error with @label->secid set to AA_SECID_INVALID 135 */ 136 int aa_alloc_secid(struct aa_label *label, gfp_t gfp) 137 { 138 unsigned long flags; 139 int ret; 140 141 xa_lock_irqsave(&aa_secids, flags); 142 ret = __xa_alloc(&aa_secids, &label->secid, label, 143 XA_LIMIT(AA_FIRST_SECID, INT_MAX), gfp); 144 xa_unlock_irqrestore(&aa_secids, flags); 145 146 if (ret < 0) { 147 label->secid = AA_SECID_INVALID; 148 return ret; 149 } 150 151 return 0; 152 } 153 154 /** 155 * aa_free_secid - free a secid 156 * @secid: secid to free 157 */ 158 void aa_free_secid(u32 secid) 159 { 160 unsigned long flags; 161 162 xa_lock_irqsave(&aa_secids, flags); 163 __xa_erase(&aa_secids, secid); 164 xa_unlock_irqrestore(&aa_secids, flags); 165 } 166