1 /* 2 * AppArmor security module 3 * 4 * This file contains AppArmor security identifier (secid) manipulation fns 5 * 6 * Copyright 2009-2017 Canonical Ltd. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation, version 2 of the 11 * License. 12 * 13 * 14 * AppArmor allocates a unique secid for every label used. If a label 15 * is replaced it receives the secid of the label it is replacing. 16 */ 17 18 #include <linux/errno.h> 19 #include <linux/err.h> 20 #include <linux/gfp.h> 21 #include <linux/slab.h> 22 #include <linux/spinlock.h> 23 24 #include "include/cred.h" 25 #include "include/lib.h" 26 #include "include/secid.h" 27 #include "include/label.h" 28 #include "include/policy_ns.h" 29 30 /* 31 * secids - do not pin labels with a refcount. They rely on the label 32 * properly updating/freeing them 33 * 34 * A singly linked free list is used to track secids that have been 35 * freed and reuse them before allocating new ones 36 */ 37 38 #define FREE_LIST_HEAD 1 39 40 static RADIX_TREE(aa_secids_map, GFP_ATOMIC); 41 static DEFINE_SPINLOCK(secid_lock); 42 static u32 alloced_secid = FREE_LIST_HEAD; 43 static u32 free_list = FREE_LIST_HEAD; 44 static unsigned long free_count; 45 46 /* 47 * TODO: allow policy to reserve a secid range? 48 * TODO: add secid pinning 49 * TODO: use secid_update in label replace 50 */ 51 52 #define SECID_MAX U32_MAX 53 54 /* TODO: mark free list as exceptional */ 55 static void *to_ptr(u32 secid) 56 { 57 return (void *) 58 ((((unsigned long) secid) << RADIX_TREE_EXCEPTIONAL_SHIFT)); 59 } 60 61 static u32 to_secid(void *ptr) 62 { 63 return (u32) (((unsigned long) ptr) >> RADIX_TREE_EXCEPTIONAL_SHIFT); 64 } 65 66 67 /* TODO: tag free_list entries to mark them as different */ 68 static u32 __pop(struct aa_label *label) 69 { 70 u32 secid = free_list; 71 void __rcu **slot; 72 void *entry; 73 74 if (free_list == FREE_LIST_HEAD) 75 return AA_SECID_INVALID; 76 77 slot = radix_tree_lookup_slot(&aa_secids_map, secid); 78 AA_BUG(!slot); 79 entry = radix_tree_deref_slot_protected(slot, &secid_lock); 80 free_list = to_secid(entry); 81 radix_tree_replace_slot(&aa_secids_map, slot, label); 82 free_count--; 83 84 return secid; 85 } 86 87 static void __push(u32 secid) 88 { 89 void __rcu **slot; 90 91 slot = radix_tree_lookup_slot(&aa_secids_map, secid); 92 AA_BUG(!slot); 93 radix_tree_replace_slot(&aa_secids_map, slot, to_ptr(free_list)); 94 free_list = secid; 95 free_count++; 96 } 97 98 static struct aa_label * __secid_update(u32 secid, struct aa_label *label) 99 { 100 struct aa_label *old; 101 void __rcu **slot; 102 103 slot = radix_tree_lookup_slot(&aa_secids_map, secid); 104 AA_BUG(!slot); 105 old = radix_tree_deref_slot_protected(slot, &secid_lock); 106 radix_tree_replace_slot(&aa_secids_map, slot, label); 107 108 return old; 109 } 110 111 /** 112 * aa_secid_update - update a secid mapping to a new label 113 * @secid: secid to update 114 * @label: label the secid will now map to 115 */ 116 void aa_secid_update(u32 secid, struct aa_label *label) 117 { 118 struct aa_label *old; 119 unsigned long flags; 120 121 spin_lock_irqsave(&secid_lock, flags); 122 old = __secid_update(secid, label); 123 spin_unlock_irqrestore(&secid_lock, flags); 124 } 125 126 /** 127 * 128 * see label for inverse aa_label_to_secid 129 */ 130 struct aa_label *aa_secid_to_label(u32 secid) 131 { 132 struct aa_label *label; 133 134 rcu_read_lock(); 135 label = radix_tree_lookup(&aa_secids_map, secid); 136 rcu_read_unlock(); 137 138 return label; 139 } 140 141 int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 142 { 143 /* TODO: cache secctx and ref count so we don't have to recreate */ 144 struct aa_label *label = aa_secid_to_label(secid); 145 int len; 146 147 AA_BUG(!secdata); 148 AA_BUG(!seclen); 149 150 if (!label) 151 return -EINVAL; 152 153 if (secdata) 154 len = aa_label_asxprint(secdata, root_ns, label, 155 FLAG_SHOW_MODE | FLAG_VIEW_SUBNS | 156 FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT, 157 GFP_ATOMIC); 158 else 159 len = aa_label_snxprint(NULL, 0, root_ns, label, 160 FLAG_SHOW_MODE | FLAG_VIEW_SUBNS | 161 FLAG_HIDDEN_UNCONFINED | FLAG_ABS_ROOT); 162 if (len < 0) 163 return -ENOMEM; 164 165 *seclen = len; 166 167 return 0; 168 } 169 170 171 int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) 172 { 173 struct aa_label *label; 174 175 label = aa_label_strn_parse(&root_ns->unconfined->label, secdata, 176 seclen, GFP_KERNEL, false, false); 177 if (IS_ERR(label)) 178 return PTR_ERR(label); 179 *secid = label->secid; 180 181 return 0; 182 } 183 184 void apparmor_release_secctx(char *secdata, u32 seclen) 185 { 186 kfree(secdata); 187 } 188 189 190 /** 191 * aa_alloc_secid - allocate a new secid for a profile 192 */ 193 u32 aa_alloc_secid(struct aa_label *label, gfp_t gfp) 194 { 195 unsigned long flags; 196 u32 secid; 197 198 /* racey, but at worst causes new allocation instead of reuse */ 199 if (free_list == FREE_LIST_HEAD) { 200 bool preload = 0; 201 int res; 202 203 retry: 204 if (gfpflags_allow_blocking(gfp) && !radix_tree_preload(gfp)) 205 preload = 1; 206 spin_lock_irqsave(&secid_lock, flags); 207 if (alloced_secid != SECID_MAX) { 208 secid = ++alloced_secid; 209 res = radix_tree_insert(&aa_secids_map, secid, label); 210 AA_BUG(res == -EEXIST); 211 } else { 212 secid = AA_SECID_INVALID; 213 } 214 spin_unlock_irqrestore(&secid_lock, flags); 215 if (preload) 216 radix_tree_preload_end(); 217 } else { 218 spin_lock_irqsave(&secid_lock, flags); 219 /* remove entry from free list */ 220 secid = __pop(label); 221 if (secid == AA_SECID_INVALID) { 222 spin_unlock_irqrestore(&secid_lock, flags); 223 goto retry; 224 } 225 spin_unlock_irqrestore(&secid_lock, flags); 226 } 227 228 return secid; 229 } 230 231 /** 232 * aa_free_secid - free a secid 233 * @secid: secid to free 234 */ 235 void aa_free_secid(u32 secid) 236 { 237 unsigned long flags; 238 239 spin_lock_irqsave(&secid_lock, flags); 240 __push(secid); 241 spin_unlock_irqrestore(&secid_lock, flags); 242 } 243